lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 16 Jan 2018 17:36:51 +0100
From:   Joerg Roedel <joro@...tes.org>
To:     Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...nel.org>,
        "H . Peter Anvin" <hpa@...or.com>
Cc:     x86@...nel.org, linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Andy Lutomirski <luto@...nel.org>,
        Dave Hansen <dave.hansen@...el.com>,
        Josh Poimboeuf <jpoimboe@...hat.com>,
        Juergen Gross <jgross@...e.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Borislav Petkov <bp@...en8.de>, Jiri Kosina <jkosina@...e.cz>,
        Boris Ostrovsky <boris.ostrovsky@...cle.com>,
        Brian Gerst <brgerst@...il.com>,
        David Laight <David.Laight@...lab.com>,
        Denys Vlasenko <dvlasenk@...hat.com>,
        Eduardo Valentin <eduval@...zon.com>,
        Greg KH <gregkh@...uxfoundation.org>,
        Will Deacon <will.deacon@....com>, aliguori@...zon.com,
        daniel.gruss@...k.tugraz.at, hughd@...gle.com, keescook@...gle.com,
        Andrea Arcangeli <aarcange@...hat.com>,
        Waiman Long <llong@...hat.com>, jroedel@...e.de,
        joro@...tes.org
Subject: [PATCH 08/16] x86/pgtable/32: Allocate 8k page-tables when PTI is enabled

From: Joerg Roedel <jroedel@...e.de>

Allocate a kernel and a user page-table root when PTI is
enabled. Also allocate a full page per root for PAEm because
otherwise the bit to flip in cr3 to switch between them
would be non-constant, which creates a lot of hassle.
Keep that for a later optimization.

Signed-off-by: Joerg Roedel <jroedel@...e.de>
---
 arch/x86/kernel/head_32.S | 23 ++++++++++++++++++-----
 arch/x86/mm/pgtable.c     | 11 ++++++-----
 2 files changed, 24 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index c29020907886..fc550559bf58 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -512,28 +512,41 @@ ENTRY(initial_code)
 ENTRY(setup_once_ref)
 	.long setup_once
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#define	PGD_ALIGN	(2 * PAGE_SIZE)
+#define PTI_USER_PGD_FILL	1024
+#else
+#define	PGD_ALIGN	(PAGE_SIZE)
+#define PTI_USER_PGD_FILL	0
+#endif
 /*
  * BSS section
  */
 __PAGE_ALIGNED_BSS
-	.align PAGE_SIZE
+	.align PGD_ALIGN
 #ifdef CONFIG_X86_PAE
 .globl initial_pg_pmd
 initial_pg_pmd:
 	.fill 1024*KPMDS,4,0
+	.fill PTI_USER_PGD_FILL,4,0
 #else
 .globl initial_page_table
 initial_page_table:
 	.fill 1024,4,0
+	.fill PTI_USER_PGD_FILL,4,0
 #endif
+	.align PGD_ALIGN
 initial_pg_fixmap:
 	.fill 1024,4,0
-.globl empty_zero_page
-empty_zero_page:
-	.fill 4096,1,0
+	.fill PTI_USER_PGD_FILL,4,0
 .globl swapper_pg_dir
+	.align PGD_ALIGN
 swapper_pg_dir:
 	.fill 1024,4,0
+	.fill PTI_USER_PGD_FILL,4,0
+.globl empty_zero_page
+empty_zero_page:
+	.fill 4096,1,0
 EXPORT_SYMBOL(empty_zero_page)
 
 /*
@@ -542,7 +555,7 @@ EXPORT_SYMBOL(empty_zero_page)
 #ifdef CONFIG_X86_PAE
 __PAGE_ALIGNED_DATA
 	/* Page-aligned for the benefit of paravirt? */
-	.align PAGE_SIZE
+	.align PGD_ALIGN
 ENTRY(initial_page_table)
 	.long	pa(initial_pg_pmd+PGD_IDENT_ATTR),0	/* low identity map */
 # if KPMDS == 3
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 004abf9ebf12..48abefd95924 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -313,7 +313,7 @@ static int __init pgd_cache_init(void)
 	 * When PAE kernel is running as a Xen domain, it does not use
 	 * shared kernel pmd. And this requires a whole page for pgd.
 	 */
-	if (!SHARED_KERNEL_PMD)
+	if (static_cpu_has(X86_FEATURE_PTI) || !SHARED_KERNEL_PMD)
 		return 0;
 
 	/*
@@ -337,8 +337,9 @@ static inline pgd_t *_pgd_alloc(void)
 	 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
 	 * We allocate one page for pgd.
 	 */
-	if (!SHARED_KERNEL_PMD)
-		return (pgd_t *)__get_free_page(PGALLOC_GFP);
+	if (static_cpu_has(X86_FEATURE_PTI) || !SHARED_KERNEL_PMD)
+		return (pgd_t *)__get_free_pages(PGALLOC_GFP,
+						 PGD_ALLOCATION_ORDER);
 
 	/*
 	 * Now PAE kernel is not running as a Xen domain. We can allocate
@@ -349,8 +350,8 @@ static inline pgd_t *_pgd_alloc(void)
 
 static inline void _pgd_free(pgd_t *pgd)
 {
-	if (!SHARED_KERNEL_PMD)
-		free_page((unsigned long)pgd);
+	if (static_cpu_has(X86_FEATURE_PTI) || !SHARED_KERNEL_PMD)
+		free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
 	else
 		kmem_cache_free(pgd_cache, pgd);
 }
-- 
2.13.6

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ