lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220819041156.873873-4-rppt@kernel.org>
Date:   Fri, 19 Aug 2022 07:11:54 +0300
From:   Mike Rapoport <rppt@...nel.org>
To:     linux-arm-kernel@...ts.infradead.org
Cc:     Ard Biesheuvel <ardb@...nel.org>,
        Catalin Marinas <catalin.marinas@....com>,
        Guanghui Feng <guanghuifeng@...ux.alibaba.com>,
        Mark Rutland <mark.rutland@....com>,
        Mike Rapoport <rppt@...nel.org>,
        Mike Rapoport <rppt@...ux.ibm.com>,
        Will Deacon <will@...nel.org>, linux-kernel@...r.kernel.org,
        linux-mm@...ck.org
Subject: [PATCH 3/5] arm64/mmu: move helpers for hotplug page tables freeing close to callers

From: Mike Rapoport <rppt@...ux.ibm.com>

to minimize extra ifdefery when unmap_*() methods will be used to remap
crash kernel.

Signed-off-by: Mike Rapoport <rppt@...ux.ibm.com>
---
 arch/arm64/mm/mmu.c | 50 ++++++++++++++++++++++-----------------------
 1 file changed, 25 insertions(+), 25 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ea81e40a25cd..92267e5e9b5f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -887,30 +887,6 @@ static void free_hotplug_page_range(struct page *page, size_t size,
 	}
 }
 
-static void free_hotplug_pgtable_page(struct page *page)
-{
-	free_hotplug_page_range(page, PAGE_SIZE, NULL);
-}
-
-static bool pgtable_range_aligned(unsigned long start, unsigned long end,
-				  unsigned long floor, unsigned long ceiling,
-				  unsigned long mask)
-{
-	start &= mask;
-	if (start < floor)
-		return false;
-
-	if (ceiling) {
-		ceiling &= mask;
-		if (!ceiling)
-			return false;
-	}
-
-	if (end - 1 > ceiling - 1)
-		return false;
-	return true;
-}
-
 static void unmap_pte_range(pmd_t *pmdp, unsigned long addr,
 				    unsigned long end, bool free_mapped,
 				    struct vmem_altmap *altmap)
@@ -1043,6 +1019,30 @@ static void unmap_range(unsigned long addr, unsigned long end,
 	} while (addr = next, addr < end);
 }
 
+static bool pgtable_range_aligned(unsigned long start, unsigned long end,
+				  unsigned long floor, unsigned long ceiling,
+				  unsigned long mask)
+{
+	start &= mask;
+	if (start < floor)
+		return false;
+
+	if (ceiling) {
+		ceiling &= mask;
+		if (!ceiling)
+			return false;
+	}
+
+	if (end - 1 > ceiling - 1)
+		return false;
+	return true;
+}
+
+static void free_hotplug_pgtable_page(struct page *page)
+{
+	free_hotplug_page_range(page, PAGE_SIZE, NULL);
+}
+
 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
 				 unsigned long end, unsigned long floor,
 				 unsigned long ceiling)
@@ -1196,7 +1196,7 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
 		free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
 	} while (addr = next, addr < end);
 }
-#endif
+#endif /* CONFIG_MEMORY_HOTPLUG */
 
 #if !ARM64_KERNEL_USES_PMD_MAPS
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
-- 
2.35.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ