lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20260205013527.322157-3-vishal.moola@gmail.com>
Date: Wed,  4 Feb 2026 17:35:25 -0800
From: "Vishal Moola (Oracle)" <vishal.moola@...il.com>
To: linux-kernel@...r.kernel.org,
	linux-mm@...ck.org,
	x86@...nel.org,
	"Mike Rapoport (Microsoft)" <rppt@...nel.org>,
	Dave Hansen <dave.hansen@...ux.intel.com>
Cc: akpm@...ux-foundation.org,
	"Matthew Wilcox (Oracle)" <willy@...radead.org>,
	Andy Lutomirski <luto@...nel.org>,
	Peter Zijlstra <peterz@...radead.org>,
	"Vishal Moola (Oracle)" <vishal.moola@...il.com>
Subject: [PATCH v4 2/4] x86/mm/pat: Convert pte code to use ptdescs

We need all allocation and free sites to use the ptdesc APIs in order to
allocate them separately from regular pages. Convert these pte
allocation/free sites to use ptdescs.

Also, rename *_pte_page() functions to *_pte(). Rename them now to avoid
any confusion later. Eventually these allocations will be backed by a
ptdesc not a page, but that's not important to callers either.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@...il.com>
---
 arch/x86/mm/pat/set_memory.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 6c6eb486f7a6..c6c68fbbb046 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1400,7 +1400,7 @@ static int collapse_large_pages(unsigned long addr, struct list_head *pgtables)
 	return collapsed;
 }
 
-static bool try_to_free_pte_page(pte_t *pte)
+static bool try_to_free_pte(pte_t *pte)
 {
 	int i;
 
@@ -1408,7 +1408,7 @@ static bool try_to_free_pte_page(pte_t *pte)
 		if (!pte_none(pte[i]))
 			return false;
 
-	free_page((unsigned long)pte);
+	pgtable_free_addr(pte);
 	return true;
 }
 
@@ -1435,7 +1435,7 @@ static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
 		pte++;
 	}
 
-	if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
+	if (try_to_free_pte((pte_t *)pmd_page_vaddr(*pmd))) {
 		pmd_clear(pmd);
 		return true;
 	}
@@ -1537,9 +1537,9 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
 	 */
 }
 
-static int alloc_pte_page(pmd_t *pmd)
+static int alloc_pte(pmd_t *pmd)
 {
-	pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
+	pte_t *pte = (pte_t *) pgtable_alloc_addr(GFP_KERNEL, 0);
 	if (!pte)
 		return -1;
 
@@ -1600,7 +1600,7 @@ static long populate_pmd(struct cpa_data *cpa,
 		 */
 		pmd = pmd_offset(pud, start);
 		if (pmd_none(*pmd))
-			if (alloc_pte_page(pmd))
+			if (alloc_pte(pmd))
 				return -1;
 
 		populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
@@ -1641,7 +1641,7 @@ static long populate_pmd(struct cpa_data *cpa,
 	if (start < end) {
 		pmd = pmd_offset(pud, start);
 		if (pmd_none(*pmd))
-			if (alloc_pte_page(pmd))
+			if (alloc_pte(pmd))
 				return -1;
 
 		populate_pte(cpa, start, end, num_pages - cur_pages,
-- 
2.52.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ