lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu,  8 Dec 2016 19:21:44 +0300
From:   "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To:     Linus Torvalds <torvalds@...ux-foundation.org>,
        Andrew Morton <akpm@...ux-foundation.org>, x86@...nel.org,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Arnd Bergmann <arnd@...db.de>,
        "H. Peter Anvin" <hpa@...or.com>
Cc:     Andi Kleen <ak@...ux.intel.com>,
        Dave Hansen <dave.hansen@...el.com>,
        Andy Lutomirski <luto@...capital.net>,
        linux-arch@...r.kernel.org, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org,
        "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [RFC, PATCHv1 22/28] x86/espfix: support 5-level paging

XXX: how to test this?

Not-yet-Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
---
 arch/x86/kernel/espfix_64.c | 41 ++++++++++++++++++++++++++++++++++++++---
 1 file changed, 38 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 04f89caef9c4..f0afa0af4237 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -70,8 +70,15 @@ static DEFINE_MUTEX(espfix_init_mutex);
 #define ESPFIX_MAX_PAGES  DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
 static void *espfix_pages[ESPFIX_MAX_PAGES];
 
-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
+#if CONFIG_PGTABLE_LEVELS == 5
+static __page_aligned_bss pud_t espfix_pgtable_page[PTRS_PER_PUD]
 	__aligned(PAGE_SIZE);
+#elif CONFIG_PGTABLE_LEVELS == 4
+static __page_aligned_bss pud_t espfix_pgtable_page[PTRS_PER_PUD]
+	__aligned(PAGE_SIZE);
+#else
+#error Unexpected CONFIG_PGTABLE_LEVELS
+#endif
 
 static unsigned int page_random, slot_random;
 
@@ -97,6 +104,8 @@ static inline unsigned long espfix_base_addr(unsigned int cpu)
 #define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
 #define ESPFIX_PMD_CLONES PTRS_PER_PMD
 #define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
+/* XXX: what should it be? */
+#define ESPFIX_P4D_CLONES PTRS_PER_P4D
 
 #define PGTABLE_PROT	  ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
 
@@ -122,10 +131,21 @@ static void init_espfix_random(void)
 void __init init_espfix_bsp(void)
 {
 	pgd_t *pgd_p;
+	p4d_t *p4d;
 
 	/* Install the espfix pud into the kernel page directory */
 	pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
-	pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
+	switch (CONFIG_PGTABLE_LEVELS) {
+	case 4:
+		p4d = p4d_offset(pgd_p, ESPFIX_BASE_ADDR);
+		p4d_populate(&init_mm, p4d, (pud_t *)espfix_pgtable_page);
+		break;
+	case 5:
+		pgd_populate(&init_mm, pgd_p, (p4d_t *)espfix_pgtable_page);
+		break;
+	default:
+		BUILD_BUG();
+	}
 
 	/* Randomize the locations */
 	init_espfix_random();
@@ -138,6 +158,7 @@ void init_espfix_ap(int cpu)
 {
 	unsigned int page;
 	unsigned long addr;
+	p4d_t p4d, *p4d_p;
 	pud_t pud, *pud_p;
 	pmd_t pmd, *pmd_p;
 	pte_t pte, *pte_p;
@@ -167,7 +188,21 @@ void init_espfix_ap(int cpu)
 	node = cpu_to_node(cpu);
 	ptemask = __supported_pte_mask;
 
-	pud_p = &espfix_pud_page[pud_index(addr)];
+	if (CONFIG_PGTABLE_LEVELS == 5) {
+		p4d_p = (p4d_t *)espfix_pgtable_page + p4d_index(addr);
+		p4d = *p4d_p;
+		if (!p4d_present(p4d)) {
+			struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+
+			pud_p = (pud_t *)page_address(page);
+			p4d = __p4d(__pa(pud_p) | (PGTABLE_PROT & ptemask));
+			paravirt_alloc_pud(&init_mm, __pa(pud_p) >> PAGE_SHIFT);
+			for (n = 0; n < ESPFIX_P4D_CLONES; n++)
+				set_p4d(&p4d_p[n], p4d);
+		}
+	} else {
+		pud_p = (pud_t *)espfix_pgtable_page + pud_index(addr);
+	}
 	pud = *pud_p;
 	if (!pud_present(pud)) {
 		struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
-- 
2.10.2

Powered by blists - more mailing lists