[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aYIqR4Nh3sHfIVko@kernel.org>
Date: Tue, 3 Feb 2026 19:03:03 +0200
From: Mike Rapoport <rppt@...nel.org>
To: "Vishal Moola (Oracle)" <vishal.moola@...il.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org, x86@...nel.org,
akpm@...ux-foundation.org,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Andy Lutomirski <luto@...nel.org>,
Peter Zijlstra <peterz@...radead.org>
Subject: Re: [PATCH v3 1/3] x86/mm/pat: Convert pte code to use ptdescs
Hi Vishal,
On Mon, Feb 02, 2026 at 09:20:03AM -0800, Vishal Moola (Oracle) wrote:
> In order to separately allocate ptdescs from pages, we need all allocation
> and free sites to use the appropriate functions. Convert these pte
> allocation/free sites to use ptdescs.
>
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@...il.com>
> ---
> arch/x86/mm/pat/set_memory.c | 15 +++++++++------
> 1 file changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> index 6c6eb486f7a6..f9f9d4ca8e71 100644
> --- a/arch/x86/mm/pat/set_memory.c
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -1408,7 +1408,7 @@ static bool try_to_free_pte_page(pte_t *pte)
> if (!pte_none(pte[i]))
> return false;
>
> - free_page((unsigned long)pte);
> + pagetable_free(virt_to_ptdesc((void *)pte));
> return true;
> }
>
> @@ -1537,12 +1537,15 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
> */
> }
>
> -static int alloc_pte_page(pmd_t *pmd)
> +static int alloc_pte_ptdesc(pmd_t *pmd)
> {
> - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
> - if (!pte)
> + pte_t *pte;
> + struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0);
AFAIR, x86 folks like reverse xmas tree for variable declarations.
> +
> + if (!ptdesc)
> return -1;
>
> + pte = (pte_t *) ptdesc_address(ptdesc);
No need to cast void * to another pointer type.
Same comments are relevant for two other patches as well.
> set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
> return 0;
> }
> @@ -1600,7 +1603,7 @@ static long populate_pmd(struct cpa_data *cpa,
> */
> pmd = pmd_offset(pud, start);
> if (pmd_none(*pmd))
> - if (alloc_pte_page(pmd))
> + if (alloc_pte_ptdesc(pmd))
> return -1;
>
> populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
> @@ -1641,7 +1644,7 @@ static long populate_pmd(struct cpa_data *cpa,
> if (start < end) {
> pmd = pmd_offset(pud, start);
> if (pmd_none(*pmd))
> - if (alloc_pte_page(pmd))
> + if (alloc_pte_ptdesc(pmd))
> return -1;
>
> populate_pte(cpa, start, end, num_pages - cur_pages,
> --
> 2.52.0
>
--
Sincerely yours,
Mike.
Powered by blists - more mailing lists