[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260128224049.385013-3-vishal.moola@gmail.com>
Date: Wed, 28 Jan 2026 14:40:48 -0800
From: "Vishal Moola (Oracle)" <vishal.moola@...il.com>
To: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
x86@...nel.org,
"Mike Rapoport (Microsoft)" <rppt@...nel.org>
Cc: akpm@...ux-foundation.org,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Andy Lutomirski <luto@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
"Vishal Moola (Oracle)" <vishal.moola@...il.com>
Subject: [PATCH v2 2/3] x86/mm/pat: Convert pmd code to use ptdescs
In order to separately allocate ptdescs from pages, we need all allocation
and free sites to use the appropriate functions. Convert these pmd
allocation/free sites to use ptdescs.
populate_pgd() also allocates pagetables that may later be freed by
try_to_free_pmd_page(), so allocate ptdescs there as well.
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@...il.com>
---
arch/x86/mm/pat/set_memory.c | 19 +++++++++++--------
1 file changed, 11 insertions(+), 8 deletions(-)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 2dcb565d8f9b..ee3d0067aeea 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1420,7 +1420,7 @@ static bool try_to_free_pmd_page(pmd_t *pmd)
if (!pmd_none(pmd[i]))
return false;
- free_page((unsigned long)pmd);
+ pagetable_free(virt_to_ptdesc((void *)pmd));
return true;
}
@@ -1548,9 +1548,10 @@ static int alloc_pte_ptdesc(pmd_t *pmd)
return 0;
}
-static int alloc_pmd_page(pud_t *pud)
+static int alloc_pmd_ptdesc(pud_t *pud)
{
- pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
+ pmd_t *pmd = (pmd_t *) ptdesc_address(
+ pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0));
if (!pmd)
return -1;
@@ -1623,7 +1624,7 @@ static long populate_pmd(struct cpa_data *cpa,
* We cannot use a 1G page so allocate a PMD page if needed.
*/
if (pud_none(*pud))
- if (alloc_pmd_page(pud))
+ if (alloc_pmd_ptdesc(pud))
return -1;
pmd = pmd_offset(pud, start);
@@ -1679,7 +1680,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
* Need a PMD page?
*/
if (pud_none(*pud))
- if (alloc_pmd_page(pud))
+ if (alloc_pmd_ptdesc(pud))
return -1;
cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
@@ -1716,7 +1717,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
pud = pud_offset(p4d, start);
if (pud_none(*pud))
- if (alloc_pmd_page(pud))
+ if (alloc_pmd_ptdesc(pud))
return -1;
tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
@@ -1744,7 +1745,8 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
pgd_entry = cpa->pgd + pgd_index(addr);
if (pgd_none(*pgd_entry)) {
- p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
+ p4d = (p4d_t *) ptdesc_address(
+ pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0));
if (!p4d)
return -1;
@@ -1756,7 +1758,8 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
*/
p4d = p4d_offset(pgd_entry, addr);
if (p4d_none(*p4d)) {
- pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
+ pud = (pud_t *) ptdesc_address(
+ pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0));
if (!pud)
return -1;
--
2.52.0
Powered by blists - more mailing lists