[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250917125725.665-2-pratyush@kernel.org>
Date: Wed, 17 Sep 2025 14:56:54 +0200
From: Pratyush Yadav <pratyush@...nel.org>
To: Alexander Graf <graf@...zon.com>,
Mike Rapoport <rppt@...nel.org>,
Changyuan Lyu <changyuanl@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Baoquan He <bhe@...hat.com>,
Pratyush Yadav <pratyush@...nel.org>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Jason Gunthorpe <jgg@...dia.com>,
Chris Li <chrisl@...nel.org>,
Jason Miu <jasonmiu@...gle.com>
Cc: linux-kernel@...r.kernel.org,
kexec@...ts.infradead.org,
linux-mm@...ck.org
Subject: [PATCH v2 2/2] kho: make sure page being restored is actually from KHO
When restoring a page, no sanity checks are done to make sure the page
actually came from a kexec handover. The caller is trusted to pass in
the right address. If the caller has a bug and passes in a wrong
address, an in-use page might be "restored" and returned, causing all
sorts of memory corruption.
Harden the page restore logic by stashing in a magic number in
page->private along with the order. If the magic number does not match,
the page won't be touched. page->private is an unsigned long. The union
kho_page_info splits it into two parts, with one holding the order and
the other holding the magic number.
Signed-off-by: Pratyush Yadav <pratyush@...nel.org>
---
Notes:
Changes in v2:
- Add a WARN_ON_ONCE() if order or magic is invalid.
- Add a comment explaining why the magic check also implicitly makes
sure phys is order-aligned.
- Clear page private to make sure later restores of the same page error
out.
- Move the checks to kho_restore_page() since patch 1 now moves sanity
checking to it.
kernel/kexec_handover.c | 41 ++++++++++++++++++++++++++++++++++-------
1 file changed, 34 insertions(+), 7 deletions(-)
diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c
index 69cab82abaaef..911fda8532b2e 100644
--- a/kernel/kexec_handover.c
+++ b/kernel/kexec_handover.c
@@ -32,6 +32,22 @@
#define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map"
#define PROP_SUB_FDT "fdt"
+#define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
+
+/*
+ * KHO uses page->private, which is an unsigned long, to store page metadata.
+ * Use it to store both the magic and the order.
+ */
+union kho_page_info {
+ unsigned long page_private;
+ struct {
+ unsigned int order;
+ unsigned int magic;
+ };
+};
+
+static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
+
static bool kho_enable __ro_after_init;
bool kho_is_enabled(void)
@@ -186,16 +202,24 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
static struct page *kho_restore_page(phys_addr_t phys)
{
struct page *page = pfn_to_online_page(PHYS_PFN(phys));
- unsigned int nr_pages, order;
+ union kho_page_info info;
+ unsigned int nr_pages;
if (!page)
return NULL;
- order = page->private;
- if (order > MAX_PAGE_ORDER)
+ info.page_private = page->private;
+ /*
+ * deserialize_bitmap() only sets the magic on the head page. This magic
+ * check also implicitly makes sure phys is order-aligned since for
+ * non-order-aligned phys addresses, magic will never be set.
+ */
+ if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER))
return NULL;
- nr_pages = (1 << order);
+ nr_pages = (1 << info.order);
+ /* Clear private to make sure later restores on this page error out. */
+ page->private = 0;
/* Head page gets refcount of 1. */
set_page_count(page, 1);
@@ -203,8 +227,8 @@ static struct page *kho_restore_page(phys_addr_t phys)
for (unsigned int i = 1; i < nr_pages; i++)
set_page_count(page + i, 0);
- if (order > 0)
- prep_compound_page(page, order);
+ if (info.order > 0)
+ prep_compound_page(page, info.order);
adjust_managed_page_count(page, nr_pages);
return page;
@@ -341,10 +365,13 @@ static void __init deserialize_bitmap(unsigned int order,
phys_addr_t phys =
elm->phys_start + (bit << (order + PAGE_SHIFT));
struct page *page = phys_to_page(phys);
+ union kho_page_info info;
memblock_reserve(phys, sz);
memblock_reserved_mark_noinit(phys, sz);
- page->private = order;
+ info.magic = KHO_PAGE_MAGIC;
+ info.order = order;
+ page->private = info.page_private;
}
}
--
2.47.3
Powered by blists - more mailing lists