lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 18 Nov 2021 16:18:03 +0800
From:   Huangzhaoyang <huangzhaoyang@...il.com>
To:     Ard Biesheuvel <ardb@...nel.org>,
        Catalin Marinas <catalin.marinas@....com>,
        Will Deacon <will@...nel.org>,
        Anshuman Khandual <anshuman.khandual@....com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Nicholas Piggin <npiggin@...il.com>,
        Mike Rapoport <rppt@...nel.org>,
        Pavel Tatashin <pasha.tatashin@...een.com>,
        Christophe Leroy <christophe.leroy@...roup.eu>,
        Jonathan Marek <jonathan@...ek.ca>,
        Zhaoyang Huang <zhaoyang.huang@...soc.com>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org
Subject: [RFC PATCH] arch: arm64: try to use PTE_CONT when change page attr

From: Zhaoyang Huang <zhaoyang.huang@...soc.com>

kernel will use the min granularity when rodata_full enabled which
make TLB pressure high. Furthermore, there is no PTE_CONT applied.
Try to improve these a little by apply PTE_CONT when change page's
attr.

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@...soc.com>
---
 arch/arm64/mm/pageattr.c | 62 ++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 58 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index a3bacd7..0b6a354 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -61,8 +61,13 @@ static int change_memory_common(unsigned long addr, int numpages,
 	unsigned long start = addr;
 	unsigned long size = PAGE_SIZE * numpages;
 	unsigned long end = start + size;
+	unsigned long cont_pte_start = 0;
+	unsigned long cont_pte_end = 0;
+	unsigned long cont_pmd_start = 0;
+	unsigned long cont_pmd_end = 0;
+	pgprot_t orig_set_mask = set_mask;
 	struct vm_struct *area;
-	int i;
+	int i = 0;
 
 	if (!PAGE_ALIGNED(addr)) {
 		start &= PAGE_MASK;
@@ -98,9 +103,58 @@ static int change_memory_common(unsigned long addr, int numpages,
 	 */
 	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
 			    pgprot_val(clear_mask) == PTE_RDONLY)) {
-		for (i = 0; i < area->nr_pages; i++) {
-			__change_memory_common((u64)page_address(area->pages[i]),
-					       PAGE_SIZE, set_mask, clear_mask);
+		cont_pmd_start = (start + ~CONT_PMD_MASK + 1) & CONT_PMD_MASK;
+		cont_pmd_end = cont_pmd_start + ~CONT_PMD_MASK + 1;
+		cont_pte_start = (start + ~CONT_PTE_MASK + 1) & CONT_PTE_MASK;
+		cont_pte_end = cont_pte_start + ~CONT_PTE_MASK + 1;
+
+		if (addr <= cont_pmd_start && end > cont_pmd_end) {
+			do {
+				__change_memory_common((u64)page_address(area->pages[i]),
+						PAGE_SIZE, set_mask, clear_mask);
+				i++;
+				addr++;
+			} while(addr < cont_pmd_start);
+			do {
+				set_mask = __pgprot(pgprot_val(set_mask) | PTE_CONT);
+				__change_memory_common((u64)page_address(area->pages[i]),
+						PAGE_SIZE, set_mask, clear_mask);
+				i++;
+				addr++;
+			} while(addr < cont_pmd_end);
+			set_mask = orig_set_mask;
+			do {
+				__change_memory_common((u64)page_address(area->pages[i]),
+						PAGE_SIZE, set_mask, clear_mask);
+				i++;
+				addr++;
+			} while(addr <= end);
+		} else if (addr <= cont_pte_start && end > cont_pte_end) {
+			do {
+				__change_memory_common((u64)page_address(area->pages[i]),
+						PAGE_SIZE, set_mask, clear_mask);
+				i++;
+				addr++;
+			} while(addr < cont_pte_start);
+			do {
+				set_mask = __pgprot(pgprot_val(set_mask) | PTE_CONT);
+				__change_memory_common((u64)page_address(area->pages[i]),
+						PAGE_SIZE, set_mask, clear_mask);
+				i++;
+				addr++;
+			} while(addr < cont_pte_end);
+			set_mask = orig_set_mask;
+			do {
+				__change_memory_common((u64)page_address(area->pages[i]),
+						PAGE_SIZE, set_mask, clear_mask);
+				i++;
+				addr++;
+			} while(addr <= end);
+		} else {
+			for (i = 0; i < area->nr_pages; i++) {
+				__change_memory_common((u64)page_address(area->pages[i]),
+						PAGE_SIZE, set_mask, clear_mask);
+			}
 		}
 	}
 
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ