lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20080201095324.5F1FF1B416F@basil.firstfloor.org>
Date:	Fri,  1 Feb 2008 10:53:24 +0100 (CET)
From:	Andi Kleen <ak@...e.de>
To:	tglx@...utronix.de, mingo@...e.hu, linux-kernel@...r.kernel.org
Subject: [PATCH] [11/12] GBPAGES: Do kernel direct mapping at boot using GB pages


The AMD Fam10h CPUs support new Gigabyte page table entry for 
mapping 1GB at a time. Use this for the kernel direct mapping. 

Only done for 64bit because i386 does not support GB page tables.

This only applies to the data portion of the direct mapping; the
kernel text mapping stays with 2MB pages because the AMD Fam10h
microarchitecture does not support GB ITLBs and AMD recommends 
against using GB mappings for code.

GBpage are disabled when DEBUG_PAGEALLOC is enabled, because
DEBUG_PAGEALLOC causes recursion in cpa() and with gbpages
the potential max recursion depth is much deeper. 

Can be disabled with direct_gbpages=off

Signed-off-by: Andi Kleen <ak@...e.de>

---
 arch/x86/mm/init_64.c |   35 ++++++++++++++++++++++++++++++-----
 1 file changed, 30 insertions(+), 5 deletions(-)

Index: linux/arch/x86/mm/init_64.c
===================================================================
--- linux.orig/arch/x86/mm/init_64.c
+++ linux/arch/x86/mm/init_64.c
@@ -339,7 +339,14 @@ phys_pud_init(pud_t *pud_page, unsigned 
 		}
 
 		if (pud_val(*pud)) {
-			phys_pmd_update(pud, addr, end);
+			if (!pud_large(*pud))
+				phys_pmd_update(pud, addr, end);
+			continue;
+		}
+
+		if (direct_gbpages == GBP_ON) {
+			set_pte((pte_t *)pud,
+				pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
 			continue;
 		}
 
@@ -360,9 +367,11 @@ static void __init find_early_table_spac
 	unsigned long puds, pmds, tables, start;
 
 	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-	tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
-		 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+	tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
+	if (direct_gbpages == GBP_ON) {
+		pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+		tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+	}
 
 	/*
 	 * RED-PEN putting page tables only on node 0 could
@@ -389,6 +398,20 @@ static void __init find_early_table_spac
 		(table_start << PAGE_SHIFT) + tables);
 }
 
+static void __init init_gbpages(void)
+{
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	/* debug pagealloc causes too much recursion with gbpages */
+	if (direct_gbpages != GBP_DEFAULT)
+		return;
+#endif
+	if (direct_gbpages != GBP_OFF && cpu_has_gbpages) {
+		printk(KERN_INFO "Using GB pages for direct mapping\n");
+		direct_gbpages = GBP_ON;
+	} else
+		direct_gbpages = GBP_OFF;
+}
+
 /*
  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
  * This runs before bootmem is initialized and gets pages directly from
@@ -407,8 +430,10 @@ void __init_refok init_memory_mapping(un
 	 * memory mapped. Unfortunately this is done currently before the
 	 * nodes are discovered.
 	 */
-	if (!after_bootmem)
+	if (!after_bootmem) {
+		init_gbpages();
 		find_early_table_space(end);
+	}
 
 	start = (unsigned long)__va(start);
 	end = (unsigned long)__va(end);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ