lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1483519053-23402-3-git-send-email-bhe@redhat.com>
Date:   Wed,  4 Jan 2017 16:37:32 +0800
From:   Baoquan He <bhe@...hat.com>
To:     tglx@...utronix.de, hpa@...or.com, mingo@...hat.com
Cc:     linux-kernel@...r.kernel.org, x86@...nel.org,
        keescook@...omium.org, yinghai@...nel.org, bp@...e.de,
        thgarnie@...gle.com, kuleshovmail@...il.com, luto@...nel.org,
        mcgrof@...nel.org, anderson@...hat.com, dyoung@...hat.com,
        Baoquan He <bhe@...hat.com>
Subject: [PATCH v3 2/3] x86/64: Introduce a new constant KERNEL_MAPPING_SIZE

In x86, KERNEL_IMAGE_SIZE is used to limit the size of kernel image in
running space, but also represents the size of kernel image mapping area.
This looks good when kernel virtual address is invariable inside 512M
area and kernel image size is not bigger than 512M.

Along with the adding of kaslr, in x86_64 the area of kernel mapping is
extended up another 512M. It becomes improper to let KERNEL_IMAGE_SIZE
alone still play two roles now.

So introduce a new constant KERNEL_MAPPING_SIZE to represent the size of
kernel mapping area in x86_64. Let KERNEL_IMAGE_SIZE be as its name is
saying.

In this patch, just add KERNEL_MAPPING_SIZE and replace KERNEL_IMAGE_SIZE
with it in the relevant places. No functional change.

Signed-off-by: Baoquan He <bhe@...hat.com>
Acked-by: Kees Cook <keescook@...omium.org>
---
 arch/x86/boot/compressed/kaslr.c        | 20 +++++++++++++++-----
 arch/x86/include/asm/page_64_types.h    | 17 ++++++++++++-----
 arch/x86/include/asm/pgtable_64_types.h |  2 +-
 arch/x86/kernel/head_64.S               |  3 ++-
 arch/x86/mm/physaddr.c                  |  6 +++---
 5 files changed, 33 insertions(+), 15 deletions(-)

diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index a66854d..6ded03b 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -22,6 +22,12 @@
 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
 		LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
 
+/*
+ * By default, the size of kernel text mapping equals KERNEL_IMAGE_SIZE.
+ * While x86_64 may extend it to 1G if KASLR is enabled.
+ */
+static unsigned long _kernel_mapping_size = KERNEL_IMAGE_SIZE;
+
 static unsigned long rotate_xor(unsigned long hash, const void *area,
 				size_t size)
 {
@@ -311,7 +317,7 @@ static void process_e820_entry(struct e820entry *entry,
 		return;
 
 	/* On 32-bit, ignore entries entirely above our maximum. */
-	if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= KERNEL_IMAGE_SIZE)
+	if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= _kernel_mapping_size)
 		return;
 
 	/* Ignore entries entirely below our minimum. */
@@ -341,8 +347,8 @@ static void process_e820_entry(struct e820entry *entry,
 
 		/* On 32-bit, reduce region size to fit within max size. */
 		if (IS_ENABLED(CONFIG_X86_32) &&
-		    region.start + region.size > KERNEL_IMAGE_SIZE)
-			region.size = KERNEL_IMAGE_SIZE - region.start;
+		    region.start + region.size > _kernel_mapping_size)
+			region.size = _kernel_mapping_size - region.start;
 
 		/* Return if region can't contain decompressed kernel */
 		if (region.size < image_size)
@@ -408,9 +414,9 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
 	/*
 	 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
 	 * that can hold image_size within the range of minimum to
-	 * KERNEL_IMAGE_SIZE?
+	 * _kernel_mapping_size?
 	 */
-	slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
+	slots = (_kernel_mapping_size - minimum - image_size) /
 		 CONFIG_PHYSICAL_ALIGN + 1;
 
 	random_addr = kaslr_get_random_long("Virtual") % slots;
@@ -438,6 +444,10 @@ void choose_random_location(unsigned long input,
 		return;
 	}
 
+#ifdef CONFIG_X86_64
+	_kernel_mapping_size = KERNEL_MAPPING_SIZE;
+#endif
+
 	boot_params->hdr.loadflags |= KASLR_FLAG;
 
 	/* Prepare to add new identity pagetables on demand. */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 62a20ea..20a5a9b 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -49,18 +49,25 @@
 #define __PHYSICAL_MASK_SHIFT	46
 #define __VIRTUAL_MASK_SHIFT	47
 
+
+/*
+ * Kernel image size is limited to 512 MB. The kernel code+data+bss
+ * must not be bigger than that.
+ */
+#define KERNEL_IMAGE_SIZE	(512 * 1024 * 1024)
+
 /*
- * Kernel image size is limited to 1GiB due to the fixmap living in the
- * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use
- * 512MiB by default, leaving 1.5GiB for modules once the page tables
+ * Kernel mapping size is limited to 1GiB due to the fixmap living in
+ * the next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S).
+ * Use 512MiB by default, leaving 1.5GiB for modules once the page tables
  * are fully set up. If kernel ASLR is configured, it can extend the
  * kernel page table mapping, reducing the size of the modules area.
  */
 #define KERNEL_MAPPING_SIZE_EXT	(1024 * 1024 * 1024)
 #if defined(CONFIG_RANDOMIZE_BASE)
-#define KERNEL_IMAGE_SIZE	KERNEL_MAPPING_SIZE_EXT
+#define KERNEL_MAPPING_SIZE	KERNEL_MAPPING_SIZE_EXT
 #else
-#define KERNEL_IMAGE_SIZE	(512 * 1024 * 1024)
+#define KERNEL_MAPPING_SIZE	KERNEL_IMAGE_SIZE
 #endif
 
 #endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 3a26420..a357050 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -66,7 +66,7 @@ typedef struct { pteval_t pte; } pte_t;
 #define VMEMMAP_START	__VMEMMAP_BASE
 #endif /* CONFIG_RANDOMIZE_MEMORY */
 #define VMALLOC_END	(VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
-#define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
+#define MODULES_VADDR    (__START_KERNEL_map + KERNEL_MAPPING_SIZE)
 #define MODULES_END      _AC(0xffffffffff000000, UL)
 #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 03bcb67..7446055 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -466,7 +466,8 @@ NEXT_PAGE(level2_kernel_pgt)
 	 * 512M if no kaslr, 1G if kaslr enabled. Later cleanup_highmap will
 	 * clean up those unused entries.
 	 *
-	 * The module area starts after kernel mapping area.
+	 * The module area starts after kernel mapping area, see MODULES_VADDR.
+	 * It will vary with KERNEL_MAPPING_SIZE.
 	 */
 	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
 		PTRS_PER_PMD)
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index cfc3b91..c0b70fc 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -18,7 +18,7 @@ unsigned long __phys_addr(unsigned long x)
 	if (unlikely(x > y)) {
 		x = y + phys_base;
 
-		VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
+		VIRTUAL_BUG_ON(y >= KERNEL_MAPPING_SIZE);
 	} else {
 		x = y + (__START_KERNEL_map - PAGE_OFFSET);
 
@@ -35,7 +35,7 @@ unsigned long __phys_addr_symbol(unsigned long x)
 	unsigned long y = x - __START_KERNEL_map;
 
 	/* only check upper bounds since lower bounds will trigger carry */
-	VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
+	VIRTUAL_BUG_ON(y >= KERNEL_MAPPING_SIZE);
 
 	return y + phys_base;
 }
@@ -50,7 +50,7 @@ bool __virt_addr_valid(unsigned long x)
 	if (unlikely(x > y)) {
 		x = y + phys_base;
 
-		if (y >= KERNEL_IMAGE_SIZE)
+		if (y >= KERNEL_MAPPING_SIZE)
 			return false;
 	} else {
 		x = y + (__START_KERNEL_map - PAGE_OFFSET);
-- 
2.5.5

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ