lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 16 Jan 2008 23:15:31 +0100 (CET)
From:	Andi Kleen <ak@...e.de>
To:	linux-kernel@...r.kernel.org, mingo@...e.hu, tglx@...utronix.de,
	jbeulich@...ell.com, venkatesh.pallipadi@...el.com
Subject: [PATCH] [31/36] CPA: Fix reference counting when changing already changed pages


When changing a page that has already been modified to non standard attributes
before don't change the reference count. And when changing back a page
only decrease the ref count if the old attributes were non standard.

Signed-off-by: Andi Kleen <ak@...e.de>
Acked-by: Jan Beulich <jbeulich@...ell.com>

---
 arch/x86/mm/pageattr_32.c |   44 +++++++++++++++++++++++++-------------------
 arch/x86/mm/pageattr_64.c |   16 ++++++++++++----
 include/asm-x86/pgtable.h |    2 ++
 3 files changed, 39 insertions(+), 23 deletions(-)

Index: linux/arch/x86/mm/pageattr_64.c
===================================================================
--- linux.orig/arch/x86/mm/pageattr_64.c
+++ linux/arch/x86/mm/pageattr_64.c
@@ -206,20 +206,26 @@ __change_page_attr(unsigned long address
 { 
 	pte_t *kpte; 
 	struct page *kpte_page;
-	pgprot_t ref_prot2;
+	pgprot_t ref_prot2, oldprot;
 	int level;
 
 	kpte = lookup_address(address, &level);
 	if (!kpte) return 0;
 	kpte_page = virt_to_page(kpte);
+	oldprot = pte_pgprot(*kpte);
 	BUG_ON(PageCompound(kpte_page));
 	BUG_ON(PageLRU(kpte_page));
 
 	set_tlb_flush(address, cache_attr_changed(*kpte, prot, level),
 			level < 4);
 
+	ref_prot = canon_pgprot(ref_prot);
+	prot = canon_pgprot(prot);
+
 	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
 		if (level == 4) {
+			if (pgprot_val(oldprot) == pgprot_val(ref_prot))
+				page_private(kpte_page)++;
 			set_pte(kpte, pfn_pte(pfn, prot));
 		} else {
 			/*
@@ -234,12 +240,14 @@ __change_page_attr(unsigned long address
 			pgprot_val(ref_prot2) &= ~_PAGE_NX;
 			set_pte(kpte, mk_pte(split, ref_prot2));
 			kpte_page = split;
+			page_private(kpte_page)++;
 		}
-		page_private(kpte_page)++;
 	} else if (level == 4) {
+		if (pgprot_val(oldprot) != pgprot_val(ref_prot)) {
+			BUG_ON(page_private(kpte_page) <= 0);
+			page_private(kpte_page)--;
+		}
 		set_pte(kpte, pfn_pte(pfn, ref_prot));
-		BUG_ON(page_private(kpte_page) == 0);
-		page_private(kpte_page)--;
 	} else {
 		/*
 		 * When you're here you either set the same page to PAGE_KERNEL
Index: linux/arch/x86/mm/pageattr_32.c
===================================================================
--- linux.orig/arch/x86/mm/pageattr_32.c
+++ linux/arch/x86/mm/pageattr_32.c
@@ -151,20 +151,17 @@ static void set_pmd_pte(pte_t *kpte, uns
  * No more special protections in this 2/4MB area - revert to a
  * large page again. 
  */
-static inline void revert_page(struct page *kpte_page, unsigned long address)
+static void
+revert_page(struct page *kpte_page, unsigned long address, pgprot_t ref_prot)
 {
-	pgprot_t ref_prot;
 	pte_t *linear;
 
-	ref_prot =
-	((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
-		? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
-
 	linear = (pte_t *)
 		pmd_offset(pud_offset(pgd_offset_k(address), address), address);
 	set_pmd_pte(linear,  address,
-		    pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
-			    ref_prot));
+		    pte_mkhuge(pfn_pte((__pa(address) & LARGE_PAGE_MASK)
+					>> PAGE_SHIFT,
+			    ref_prot)));
 }
 
 static inline void save_page(struct page *kpte_page)
@@ -223,6 +220,8 @@ __change_page_attr(struct page *page, pg
 	unsigned long address;
 	struct page *kpte_page;
 	int level;
+	pgprot_t oldprot;
+	pgprot_t ref_prot = PAGE_KERNEL;
 
 	BUG_ON(PageHighMem(page));
 	address = (unsigned long)page_address(page);
@@ -230,6 +229,8 @@ __change_page_attr(struct page *page, pg
 	kpte = lookup_address(address, &level);
 	if (!kpte)
 		return -EINVAL;
+
+	oldprot = pte_pgprot(*kpte);
 	kpte_page = virt_to_page(kpte);
 	BUG_ON(PageLRU(kpte_page));
 	BUG_ON(PageCompound(kpte_page));
@@ -237,27 +238,32 @@ __change_page_attr(struct page *page, pg
 	set_tlb_flush(address, cache_attr_changed(*kpte, prot, level),
 			level < 3);
 
-	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
+	if ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
+		ref_prot = PAGE_KERNEL_EXEC;
+
+	ref_prot = canon_pgprot(ref_prot);
+	prot = canon_pgprot(prot);
+
+	if (pgprot_val(prot) != pgprot_val(ref_prot)) {
 		if (level == 3) {
+			if (pgprot_val(oldprot) == pgprot_val(ref_prot))
+				page_private(kpte_page)++;
 			set_pte_atomic(kpte, mk_pte(page, prot)); 
 		} else {
-			pgprot_t ref_prot;
 			struct page *split;
-
-			ref_prot =
-			((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
-				? PAGE_KERNEL_EXEC : PAGE_KERNEL;
 			split = split_large_page(address, prot, ref_prot);
 			if (!split)
 				return -ENOMEM;
 			set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
 			kpte_page = split;
+			page_private(kpte_page)++;
 		}
-		page_private(kpte_page)++;
 	} else if (level == 3) {
-		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
-		BUG_ON(page_private(kpte_page) == 0);
-		page_private(kpte_page)--;
+		if (pgprot_val(oldprot) != pgprot_val(ref_prot)) {
+			BUG_ON(page_private(kpte_page) <= 0);
+			page_private(kpte_page)--;
+		}
+		set_pte_atomic(kpte, mk_pte(page, ref_prot));
 	} else {
 		/*
 		 * When you're here you either set the same page to PAGE_KERNEL
@@ -279,7 +285,7 @@ __change_page_attr(struct page *page, pg
 		if (cpu_has_pse && (page_private(kpte_page) == 0)) {
 			save_page(kpte_page);
 			paravirt_release_pt(page_to_pfn(kpte_page));
-			revert_page(kpte_page, address);
+			revert_page(kpte_page, address, ref_prot);
 		}
 	}
 	return 0;
Index: linux/include/asm-x86/pgtable.h
===================================================================
--- linux.orig/include/asm-x86/pgtable.h
+++ linux/include/asm-x86/pgtable.h
@@ -201,6 +201,8 @@ static inline pte_t pte_modify(pte_t pte
 
 #define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX))
 
+#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else  /* !CONFIG_PARAVIRT */
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists