lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <d9e65f4707b28b107c352bd4bc311db7a8ea738b.1769434279.git.houwenlong.hwl@antgroup.com>
Date: Mon, 26 Jan 2026 21:33:54 +0800
From: Hou Wenlong <houwenlong.hwl@...group.com>
To: linux-kernel@...r.kernel.org
Cc: Lai Jiangshan <jiangshan.ljs@...group.com>,
	Hou Wenlong <houwenlong.hwl@...group.com>,
	Thomas Gleixner <tglx@...nel.org>,
	Ingo Molnar <mingo@...hat.com>,
	Borislav Petkov <bp@...en8.de>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	x86@...nel.org,
	"H. Peter Anvin" <hpa@...or.com>,
	Juergen Gross <jgross@...e.com>,
	Boris Ostrovsky <boris.ostrovsky@...cle.com>,
	Ard Biesheuvel <ardb@...nel.org>,
	Josh Poimboeuf <jpoimboe@...nel.org>,
	Nathan Chancellor <nathan@...nel.org>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Alexander Graf <graf@...zon.com>,
	Joel Granados <joel.granados@...nel.org>,
	Thomas Huth <thuth@...hat.com>,
	Uros Bizjak <ubizjak@...il.com>,
	Brian Gerst <brgerst@...il.com>,
	Kiryl Shutsemau <kas@...nel.org>,
	"Xin Li (Intel)" <xin@...or.com>,
	Ilpo Järvinen <ilpo.jarvinen@...ux.intel.com>,
	xen-devel@...ts.xenproject.org
Subject: [RFC PATCH 4/5] x86/boot: Perform virtual address relocation in kernel entry

Perform virtual address relocation for the uncompressed kernel during
booting, which is similar to the relocation during decompression.

Signed-off-by: Hou Wenlong <houwenlong.hwl@...group.com>
---
 arch/x86/boot/startup/Makefile |   1 +
 arch/x86/boot/startup/kaslr.c  | 116 +++++++++++++++++++++++++++++++++
 arch/x86/include/asm/setup.h   |   1 +
 arch/x86/kernel/head_64.S      |   7 ++
 arch/x86/lib/cmdline.c         |   6 ++
 arch/x86/lib/kaslr.c           |   5 ++
 arch/x86/platform/pvh/head.S   |  15 ++++-
 7 files changed, 148 insertions(+), 3 deletions(-)
 create mode 100644 arch/x86/boot/startup/kaslr.c

diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile
index 5e499cfb29b5..eeaefa4e25fb 100644
--- a/arch/x86/boot/startup/Makefile
+++ b/arch/x86/boot/startup/Makefile
@@ -20,6 +20,7 @@ KCOV_INSTRUMENT	:= n
 
 obj-$(CONFIG_X86_64)		+= gdt_idt.o map_kernel.o
 obj-$(CONFIG_AMD_MEM_ENCRYPT)	+= sme.o sev-startup.o
+obj-$(CONFIG_RELOCATABLE_UNCOMPRESSED_KERNEL) += kaslr.o
 pi-objs				:= $(patsubst %.o,$(obj)/%.o,$(obj-y))
 
 lib-$(CONFIG_X86_64)		+= la57toggle.o
diff --git a/arch/x86/boot/startup/kaslr.c b/arch/x86/boot/startup/kaslr.c
new file mode 100644
index 000000000000..fb07c31e21b3
--- /dev/null
+++ b/arch/x86/boot/startup/kaslr.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+/* A hack to avoid non-static declaration for kaslr_get_random_long(). */
+#define _ASM_KASLR_H_
+#include <asm/sections.h>
+#include <asm/bootparam.h>
+#include <asm/cpuid/api.h>
+
+extern char __relocation_end[];
+
+static struct boot_params *boot_params_ptr __initdata;
+
+static inline void debug_putstr(const char *str)
+{
+}
+
+static inline bool has_cpuflag(int flag)
+{
+	u32 reg = 0;
+	u32 level = native_cpuid_eax(0x0);
+
+	if (level >= 0x00000001) {
+		if (flag == X86_FEATURE_RDRAND)
+			reg = native_cpuid_edx(0x1);
+		else if (flag == X86_FEATURE_TSC)
+			reg = native_cpuid_ecx(0x1);
+	}
+
+	return test_bit(flag & 31, (unsigned long *)&reg);
+}
+
+static unsigned long __init rotate_xor(unsigned long hash, const void *area,
+				       size_t size)
+{
+	size_t i;
+	unsigned long *ptr = (unsigned long *)area;
+
+	for (i = 0; i < size / sizeof(hash); i++) {
+		/* Rotate by odd number of bits and XOR. */
+		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+		hash ^= ptr[i];
+	}
+
+	return hash;
+}
+
+/* Attempt to create a simple but unpredictable starting entropy. */
+static unsigned long get_boot_seed(void)
+{
+	unsigned long hash = 0;
+
+	hash = rotate_xor(hash, boot_params_ptr, sizeof(*boot_params_ptr));
+
+	return hash;
+}
+
+#define KASLR_COMPRESSED_BOOT
+#define KASLR_FUNC_PREFIX static __init
+#include "../../lib/kaslr.c"
+
+/* A hack to avoid non-static declaration for cmdline_find_option_bool(). */
+#define _ASM_X86_CMDLINE_H
+#undef CONFIG_CMDLINE_BOOL
+#define builtin_cmdline NULL
+#define CMDLINE_FUNC_PREFIX static __maybe_unused __init
+#include "../../lib/cmdline.c"
+
+static unsigned long __init find_random_virt_addr(unsigned long minimum,
+						  unsigned long image_size)
+{
+	unsigned long slots, random_addr;
+
+	/*
+	 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
+	 * that can hold image_size within the range of minimum to
+	 * KERNEL_IMAGE_SIZE?
+	 */
+	slots = 1 + (KERNEL_IMAGE_SIZE - minimum - image_size) / CONFIG_PHYSICAL_ALIGN;
+
+	random_addr = kaslr_get_random_long("Virtual") % slots;
+
+	return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
+}
+
+void __init __relocate_kernel(unsigned long p2v_offset, struct boot_params *bp)
+{
+	int *reloc = (int *)rip_rel_ptr(__relocation_end);
+	unsigned long image_size = rip_rel_ptr(_end) - rip_rel_ptr(_text);
+	unsigned long ptr, virt_addr, delta;
+	unsigned long cmd_line_ptr;
+
+	/* If relocation has occurred during decompression, simply skip it. */
+	if (bp->hdr.loadflags & KASLR_FLAG)
+		return;
+
+	cmd_line_ptr = bp->hdr.cmd_line_ptr | ((u64)bp->ext_cmd_line_ptr << 32);
+	if (cmdline_find_option_bool((char *)cmd_line_ptr, "nokaslr"))
+		return;
+
+	boot_params_ptr = bp;
+	virt_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, image_size);
+	delta = virt_addr - LOAD_PHYSICAL_ADDR;
+
+	for (reloc--; *reloc; reloc--) {
+		ptr = (unsigned long)(*reloc + p2v_offset);
+		*(uint32_t *)ptr += delta;
+	}
+
+	for (reloc--; *reloc; reloc--) {
+		ptr = (unsigned long)(*reloc + p2v_offset);
+		*(uint64_t *)ptr += delta;
+	}
+}
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 914eb32581c7..86a715a255a5 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -56,6 +56,7 @@ extern void startup_64_load_idt(void *vc_handler);
 extern void __pi_startup_64_load_idt(void *vc_handler);
 extern void early_setup_idt(void);
 extern void __init do_early_exception(struct pt_regs *regs, int trapnr);
+extern void __init __relocate_kernel(unsigned long p2v_offset, struct boot_params *bp);
 
 #ifdef CONFIG_X86_INTEL_MID
 extern void x86_intel_mid_early_setup(void);
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 21816b48537c..868d8fdd59df 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -97,6 +97,13 @@ SYM_CODE_START_NOALIGN(startup_64)
 	/* Sanitize CPU configuration */
 	call verify_cpu
 
+#ifdef CONFIG_RELOCATABLE_UNCOMPRESSED_KERNEL
+	leaq	common_startup_64(%rip), %rdi
+	subq	.Lcommon_startup_64(%rip), %rdi
+	movq	%r15, %rsi
+	call	__pi___relocate_kernel
+#endif
+
 	/*
 	 * Derive the kernel's physical-to-virtual offset from the physical and
 	 * virtual addresses of common_startup_64().
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index c65cd5550454..07c4398b9e67 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -11,6 +11,10 @@
 #include <asm/cmdline.h>
 #include <asm/bug.h>
 
+#ifndef CMDLINE_FUNC_PREFIX
+#define CMDLINE_FUNC_PREFIX
+#endif
+
 static inline int myisspace(u8 c)
 {
 	return c <= ' ';	/* Close enough approximation */
@@ -205,6 +209,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
 	return len;
 }
 
+CMDLINE_FUNC_PREFIX
 int cmdline_find_option_bool(const char *cmdline, const char *option)
 {
 	int ret;
@@ -219,6 +224,7 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
 	return ret;
 }
 
+CMDLINE_FUNC_PREFIX
 int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
 			int bufsize)
 {
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index 8c7cd115b484..711a19729e20 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -13,6 +13,10 @@
 #include <asm/e820/api.h>
 #include <asm/shared/io.h>
 
+#ifndef KASLR_FUNC_PREFIX
+#define KASLR_FUNC_PREFIX
+#endif
+
 /*
  * When built for the regular kernel, several functions need to be stubbed out
  * or changed to their regular kernel equivalent.
@@ -46,6 +50,7 @@ static inline u16 i8254(void)
 	return timer;
 }
 
+KASLR_FUNC_PREFIX
 unsigned long kaslr_get_random_long(const char *purpose)
 {
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index 344030c1a81d..94832930b0a2 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -103,6 +103,17 @@ SYM_CODE_START(pvh_start_xen)
 	btsl $_EFER_LME, %eax
 	wrmsr
 
+	/*
+	 * Fill the identity mapping entries instead of preconstructing them,
+	 * as later relocations in __relocation_kernel() would modify them and
+	 * break the mapping if they are prefilled, due to the generation of
+	 * relocation entries.
+	 */
+	leal rva(pvh_init_top_pgt)(%ebp), %edi
+	addl $(pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC), (%edi)
+	leal rva(pvh_level3_ident_pgt)(%ebp), %edi
+	addl $(pvh_level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC), (%edi)
+
 	/*
 	 * Reuse the non-relocatable symbol emitted for the ELF note to
 	 * subtract the build time physical address of pvh_start_xen() from
@@ -254,7 +265,6 @@ SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
  * startup_64 transitions to init_top_pgt.
  */
 SYM_DATA_START_PAGE_ALIGNED(pvh_init_top_pgt)
-	.quad   pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
 	.org    pvh_init_top_pgt + L4_PAGE_OFFSET * 8, 0
 	.quad   pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
 	.org    pvh_init_top_pgt + L4_START_KERNEL * 8, 0
@@ -263,8 +273,7 @@ SYM_DATA_START_PAGE_ALIGNED(pvh_init_top_pgt)
 SYM_DATA_END(pvh_init_top_pgt)
 
 SYM_DATA_START_PAGE_ALIGNED(pvh_level3_ident_pgt)
-	.quad	pvh_level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
-	.fill	511, 8, 0
+	.fill	512, 8, 0
 SYM_DATA_END(pvh_level3_ident_pgt)
 SYM_DATA_START_PAGE_ALIGNED(pvh_level2_ident_pgt)
 	/*
-- 
2.31.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ