lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <mafs0pl96w7hk.fsf@kernel.org>
Date: Tue, 25 Nov 2025 14:45:59 +0100
From: Pratyush Yadav <pratyush@...nel.org>
To: Mike Rapoport <rppt@...nel.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>,  Alexander Graf
 <graf@...zon.com>,  Pasha Tatashin <pasha.tatashin@...een.com>,  Pratyush
 Yadav <pratyush@...nel.org>,  kexec@...ts.infradead.org,
  linux-mm@...ck.org,  linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/2] kho: fix restoring of contiguous ranges of order-0
 pages

On Tue, Nov 25 2025, Mike Rapoport wrote:

> From: "Mike Rapoport (Microsoft)" <rppt@...nel.org>
>
> When contiguous ranges of order-0 pages are restored, kho_restore_page()
> calls prep_compound_page() with the first page in the range and order as
> parameters and then kho_restore_pages() calls split_page() to make sure all
> pages in the range are order-0.
>
> However, since split_page() is not intended to split compound pages and
> with VM_DEBUG enabled it will trigger a VM_BUG_ON_PAGE().
>
> Update kho_restore_page() so that it will use prep_compound_page() when it
> restores a folio and make sure it properly sets page count for both large
> folios and ranges of order-0 pages.
>
> Reported-by: Pratyush Yadav <pratyush@...nel.org>
> Fixes: a667300bd53f ("kho: add support for preserving vmalloc allocations")
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@...nel.org>
> ---
>  kernel/liveupdate/kexec_handover.c | 20 ++++++++++++--------
>  1 file changed, 12 insertions(+), 8 deletions(-)
>
> diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c
> index e64ee87fa62a..61d17ed1f423 100644
> --- a/kernel/liveupdate/kexec_handover.c
> +++ b/kernel/liveupdate/kexec_handover.c
> @@ -219,11 +219,11 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
>  	return 0;
>  }
>  
> -static struct page *kho_restore_page(phys_addr_t phys)
> +static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
>  {
>  	struct page *page = pfn_to_online_page(PHYS_PFN(phys));
> +	unsigned int nr_pages, ref_cnt;
>  	union kho_page_info info;
> -	unsigned int nr_pages;
>  
>  	if (!page)
>  		return NULL;
> @@ -243,11 +243,16 @@ static struct page *kho_restore_page(phys_addr_t phys)
>  	/* Head page gets refcount of 1. */
>  	set_page_count(page, 1);
>  
> -	/* For higher order folios, tail pages get a page count of zero. */
> +	/*
> +	 * For higher order folios, tail pages get a page count of zero.
> +	 * For physically contiguous order-0 pages every pages gets a page
> +	 * count of 1
> +	 */
> +	ref_cnt = is_folio ? 0 : 1;
>  	for (unsigned int i = 1; i < nr_pages; i++)
> -		set_page_count(page + i, 0);
> +		set_page_count(page + i, ref_cnt);
>  
> -	if (info.order > 0)
> +	if (is_folio && info.order)

This is getting a bit difficult to parse. Let's separate out folio and
page initialization to separate helpers:

	/* Initalize 0-order KHO pages */
	static void kho_init_page(struct page *page, unsigned int nr_pages)
	{
		for (unsigned int i = 0; i < nr_pages; i++)
			set_page_count(page + i, 1);
	}
	
	static void kho_init_folio(struct page *page, unsigned int order)
	{
		unsigned int nr_pages = (1 << order);
	
		/* Head page gets refcount of 1. */
		set_page_count(page, 1);
	
		/* For higher order folios, tail pages get a page count of zero. */
		for (unsigned int i = 1; i < nr_pages; i++)
			set_page_count(page + i, 0);
	
		if (order > 0)
			prep_compound_page(page, order);
	}


>  		prep_compound_page(page, info.order);
>  
>  	adjust_managed_page_count(page, nr_pages);
> @@ -262,7 +267,7 @@ static struct page *kho_restore_page(phys_addr_t phys)
>   */
>  struct folio *kho_restore_folio(phys_addr_t phys)
>  {
> -	struct page *page = kho_restore_page(phys);
> +	struct page *page = kho_restore_page(phys, true);
>  
>  	return page ? page_folio(page) : NULL;
>  }
> @@ -287,11 +292,10 @@ struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
>  	while (pfn < end_pfn) {
>  		const unsigned int order =
>  			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
> -		struct page *page = kho_restore_page(PFN_PHYS(pfn));
> +		struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
>  
>  		if (!page)
>  			return NULL;
> -		split_page(page, order);
>  		pfn += 1 << order;
>  	}

-- 
Regards,
Pratyush Yadav

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ