[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20080119064831.6AA5614EAD@wotan.suse.de>
Date: Sat, 19 Jan 2008 07:48:31 +0100 (CET)
From: Andi Kleen <ak@...e.de>
To: mingo@...e.hu, tglx@...utronix.de, linux-kernel@...r.kernel.org
Subject: [PATCH] [3/8] GBPAGES: Split LARGE_PAGE_SIZE/MASK into PUD_PAGE_SIZE/PMD_PAGE_SIZE
Split the existing LARGE_PAGE_SIZE/MASK macro into two new macros
PUD_PAGE_SIZE/MASK and PMD_PAGE_SIZE/MASK.
Fix up all callers to use the new names.
Signed-off-by: Andi Kleen <ak@...e.de>
---
arch/x86/boot/compressed/head_64.S | 8 ++++----
arch/x86/kernel/head_64.S | 4 ++--
arch/x86/kernel/pci-gart_64.c | 2 +-
arch/x86/mm/init_64.c | 6 +++---
arch/x86/mm/pageattr_64.c | 4 ++--
include/asm-x86/page.h | 4 ++--
include/asm-x86/page_32.h | 4 ++++
include/asm-x86/page_64.h | 3 +++
8 files changed, 21 insertions(+), 14 deletions(-)
Index: linux/include/asm-x86/page_64.h
===================================================================
--- linux.orig/include/asm-x86/page_64.h
+++ linux/include/asm-x86/page_64.h
@@ -23,6 +23,9 @@
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
+#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
+
#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
Index: linux/arch/x86/boot/compressed/head_64.S
===================================================================
--- linux.orig/arch/x86/boot/compressed/head_64.S
+++ linux/arch/x86/boot/compressed/head_64.S
@@ -80,8 +80,8 @@ startup_32:
#ifdef CONFIG_RELOCATABLE
movl %ebp, %ebx
- addl $(LARGE_PAGE_SIZE -1), %ebx
- andl $LARGE_PAGE_MASK, %ebx
+ addl $(PMD_PAGE_SIZE -1), %ebx
+ andl $PMD_PAGE_MASK, %ebx
#else
movl $CONFIG_PHYSICAL_START, %ebx
#endif
@@ -220,8 +220,8 @@ ENTRY(startup_64)
/* Start with the delta to where the kernel will run at. */
#ifdef CONFIG_RELOCATABLE
leaq startup_32(%rip) /* - $startup_32 */, %rbp
- addq $(LARGE_PAGE_SIZE - 1), %rbp
- andq $LARGE_PAGE_MASK, %rbp
+ addq $(PMD_PAGE_SIZE - 1), %rbp
+ andq $PMD_PAGE_MASK, %rbp
movq %rbp, %rbx
#else
movq $CONFIG_PHYSICAL_START, %rbp
Index: linux/arch/x86/kernel/pci-gart_64.c
===================================================================
--- linux.orig/arch/x86/kernel/pci-gart_64.c
+++ linux/arch/x86/kernel/pci-gart_64.c
@@ -501,7 +501,7 @@ static __init unsigned long check_iommu_
}
a = aper + iommu_size;
- iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
+ iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
if (iommu_size < 64*1024*1024) {
printk(KERN_WARNING
Index: linux/arch/x86/kernel/head_64.S
===================================================================
--- linux.orig/arch/x86/kernel/head_64.S
+++ linux/arch/x86/kernel/head_64.S
@@ -63,7 +63,7 @@ startup_64:
/* Is the address not 2M aligned? */
movq %rbp, %rax
- andl $~LARGE_PAGE_MASK, %eax
+ andl $~PMD_PAGE_MASK, %eax
testl %eax, %eax
jnz bad_address
@@ -88,7 +88,7 @@ startup_64:
/* Add an Identity mapping if I am above 1G */
leaq _text(%rip), %rdi
- andq $LARGE_PAGE_MASK, %rdi
+ andq $PMD_PAGE_MASK, %rdi
movq %rdi, %rax
shrq $PUD_SHIFT, %rax
Index: linux/arch/x86/mm/init_64.c
===================================================================
--- linux.orig/arch/x86/mm/init_64.c
+++ linux/arch/x86/mm/init_64.c
@@ -420,10 +420,10 @@ __clear_kernel_mapping(unsigned long add
{
unsigned long end = address + size;
- BUG_ON(address & ~LARGE_PAGE_MASK);
- BUG_ON(size & ~LARGE_PAGE_MASK);
+ BUG_ON(address & ~PMD_PAGE_MASK);
+ BUG_ON(size & ~PMD_PAGE_MASK);
- for (; address < end; address += LARGE_PAGE_SIZE) {
+ for (; address < end; address += PMD_PAGE_SIZE) {
pgd_t *pgd = pgd_offset_k(address);
pud_t *pud;
pmd_t *pmd;
Index: linux/arch/x86/mm/pageattr_64.c
===================================================================
--- linux.orig/arch/x86/mm/pageattr_64.c
+++ linux/arch/x86/mm/pageattr_64.c
@@ -70,7 +70,7 @@ static struct page *split_large_page(uns
page_private(base) = 0;
address = __pa(address);
- addr = address & LARGE_PAGE_MASK;
+ addr = address & PMD_PAGE_MASK;
pbase = (pte_t *)page_address(base);
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
@@ -150,7 +150,7 @@ static void revert_page(unsigned long ad
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, address);
BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
- pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
+ pfn = (__pa(address) & PMD_PAGE_MASK) >> PAGE_SHIFT;
large_pte = pfn_pte(pfn, ref_prot);
large_pte = pte_mkhuge(large_pte);
set_pte((pte_t *)pmd, large_pte);
Index: linux/include/asm-x86/page_32.h
===================================================================
--- linux.orig/include/asm-x86/page_32.h
+++ linux/include/asm-x86/page_32.h
@@ -13,6 +13,10 @@
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+/* Eventually 32bit should be moved over to the new names too */
+#define LARGE_PAGE_SIZE PMD_PAGE_SIZE
+#define LARGE_PAGE_MASK PMD_PAGE_MASK
+
#ifdef CONFIG_X86_PAE
#define __PHYSICAL_MASK_SHIFT 36
#define __VIRTUAL_MASK_SHIFT 32
Index: linux/include/asm-x86/page.h
===================================================================
--- linux.orig/include/asm-x86/page.h
+++ linux/include/asm-x86/page.h
@@ -13,8 +13,8 @@
#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
#define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK))
-#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
-#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists