lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aPXrLy8BmblbLpCG@kernel.org>
Date: Mon, 20 Oct 2025 10:56:31 +0300
From: Mike Rapoport <rppt@...nel.org>
To: Pasha Tatashin <pasha.tatashin@...een.com>
Cc: akpm@...ux-foundation.org, brauner@...nel.org, corbet@....net,
	graf@...zon.com, jgg@...pe.ca, linux-kernel@...r.kernel.org,
	linux-kselftest@...r.kernel.org, linux-mm@...ck.org,
	masahiroy@...nel.org, ojeda@...nel.org, pratyush@...nel.org,
	rdunlap@...radead.org, tj@...nel.org, jasonmiu@...gle.com,
	dmatlack@...gle.com, skhawaja@...gle.com
Subject: Re: [PATCH v6 08/10] liveupdate: kho: warn and fail on metadata or
 preserved memory in scratch area

On Sat, Oct 18, 2025 at 01:17:54PM -0400, Pasha Tatashin wrote:
> It is invalid for KHO metadata or preserved memory regions to be located
> within the KHO scratch area, as this area is overwritten when the next
> kernel is loaded, and used early in boot by the next kernel. This can
> lead to memory corruption.
> 
> Adds checks to kho_preserve_* and KHO's internal metadata allocators
> (xa_load_or_alloc, new_chunk) to verify that the physical address of the
> memory does not overlap with any defined scratch region. If an overlap
> is detected, the operation will fail and a WARN_ON is triggered. To
> avoid performance overhead in production kernels, these checks are
> enabled only when CONFIG_KEXEC_HANDOVER_DEBUG is selected.
> 
> Signed-off-by: Pasha Tatashin <pasha.tatashin@...een.com>
> ---
>  kernel/liveupdate/Kconfig                   |  8 ++++
>  kernel/liveupdate/Makefile                  |  1 +
>  kernel/liveupdate/kexec_handover.c          | 52 ++++++++++++++-------
>  kernel/liveupdate/kexec_handover_debug.c    | 25 ++++++++++
>  kernel/liveupdate/kexec_handover_internal.h |  9 ++++
>  5 files changed, 78 insertions(+), 17 deletions(-)
>  create mode 100644 kernel/liveupdate/kexec_handover_debug.c
> 
> diff --git a/kernel/liveupdate/Kconfig b/kernel/liveupdate/Kconfig
> index cea287842475..851d1a22b4c5 100644
> --- a/kernel/liveupdate/Kconfig
> +++ b/kernel/liveupdate/Kconfig
> @@ -27,4 +27,12 @@ config KEXEC_HANDOVER_DEBUGFS
>  	  Also, enables inspecting the KHO fdt trees with the debugfs binary
>  	  blobs.
>  
> +config KEXEC_HANDOVER_DEBUG
> +	bool "Enable Kexec Handover debug checks"
> +	depends on KEXEC_HANDOVER_DEBUGFS
> +	help
> +	  This option enables extra sanity checks for the Kexec Handover
> +	  subsystem. Since, KHO performance is crucial in live update
> +	  scenarios and the extra code might be adding overhead it is
> +	  only optionally enabled.

And empty line here would be nice.

>  endmenu
> diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c
> index c87d00c40c82..ebfc31814d16 100644
> --- a/kernel/liveupdate/kexec_handover.c
> +++ b/kernel/liveupdate/kexec_handover.c
> @@ -8,6 +8,7 @@
>  
>  #define pr_fmt(fmt) "KHO: " fmt
>  
> +#include <linux/cleanup.h>
>  #include <linux/cma.h>
>  #include <linux/count_zeros.h>
>  #include <linux/kexec.h>
> @@ -131,26 +132,26 @@ static struct kho_out kho_out = {
>  
>  static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
>  {
> -	void *elm, *res;
> +	void *res = xa_load(xa, index);
>  
> -	elm = xa_load(xa, index);
> -	if (elm)
> -		return elm;
> +	if (res)
> +		return res;
> +
> +	void *elm __free(kfree) = kzalloc(sz, GFP_KERNEL);
>  
> -	elm = kzalloc(sz, GFP_KERNEL);
>  	if (!elm)
>  		return ERR_PTR(-ENOMEM);
>  
> +	if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), sz)))

I'd move the WARN_ON into kho_scratch_overlap().

> +		return ERR_PTR(-EINVAL);
> +
>  	res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
>  	if (xa_is_err(res))
> -		res = ERR_PTR(xa_err(res));
> -
> -	if (res) {
> -		kfree(elm);
> +		return ERR_PTR(xa_err(res));
> +	else if (res)
>  		return res;
> -	}
>  
> -	return elm;
> +	return no_free_ptr(elm);
>  }
  
...

> @@ -379,14 +384,17 @@ static int kho_mem_serialize(struct kho_out *kho_out)
>  	struct khoser_mem_chunk *chunk = NULL;
>  	struct kho_mem_phys *physxa;
>  	unsigned long order;
> +	int ret = -ENOMEM;

Nit: s/ret/err/

>  
>  	xa_for_each(&kho_out->track.orders, order, physxa) {
>  		struct kho_mem_phys_bits *bits;
>  		unsigned long phys;
>  
> diff --git a/kernel/liveupdate/kexec_handover_debug.c b/kernel/liveupdate/kexec_handover_debug.c
> new file mode 100644
> index 000000000000..7986dcc63047
> --- /dev/null
> +++ b/kernel/liveupdate/kexec_handover_debug.c
> @@ -0,0 +1,25 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * kexec_handover_debug.c - kexec handover optional debug functionality
> + * Copyright (C) 2025 Google LLC, Pasha Tatashin <pasha.tatashin@...een.com>
> + */
> +
> +#define pr_fmt(fmt) "KHO: " fmt
> +
> +#include "kexec_handover_internal.h"
> +
> +bool kho_scratch_overlap(phys_addr_t phys, size_t size)
> +{
> +	phys_addr_t scratch_start, scratch_end;
> +	unsigned int i;
> +
> +	for (i = 0; i < kho_scratch_cnt; i++) {
> +		scratch_start = kho_scratch[i].addr;
> +		scratch_end = kho_scratch[i].addr + kho_scratch[i].size - 1;

I agree with Pratyush that 

		scratch_end = kho_scratch[i].addr + kho_scratch[i].size;

		if (phys < scratch_end ...

is clearer.

> +		if (phys <= scratch_end && (phys + size) > scratch_start)
> +			return true;
> +	}
> +
> +	return false;
> +}

-- 
Sincerely yours,
Mike.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ