lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240125112818.2016733-29-ardb+git@google.com>
Date: Thu, 25 Jan 2024 12:28:29 +0100
From: Ard Biesheuvel <ardb+git@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: Ard Biesheuvel <ardb@...nel.org>, Kevin Loughlin <kevinloughlin@...gle.com>, 
	Tom Lendacky <thomas.lendacky@....com>, Dionna Glaze <dionnaglaze@...gle.com>, 
	Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>, 
	Dave Hansen <dave.hansen@...ux.intel.com>, Andy Lutomirski <luto@...nel.org>, 
	Arnd Bergmann <arnd@...db.de>, Nathan Chancellor <nathan@...nel.org>, 
	Nick Desaulniers <ndesaulniers@...gle.com>, Justin Stitt <justinstitt@...gle.com>, 
	Brian Gerst <brgerst@...il.com>, linux-arch@...r.kernel.org, llvm@...ts.linux.dev
Subject: [PATCH v2 10/17] x86/head64: Move early startup code into __pitext

From: Ard Biesheuvel <ardb@...nel.org>

The boot CPU runs some early startup C code using a 1:1 mapping of
memory, which deviates from the normal kernel virtual mapping that is
used for calculating statically initialized pointer variables.

This makes it necessary to strictly limit which C code will actually be
called from that early boot path. Implement this by moving the early
startup code into __pitext.

Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
---
 arch/x86/kernel/head64.c  |  9 ++++----
 arch/x86/kernel/head_64.S | 24 ++++++++++++--------
 2 files changed, 20 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 993d888a3172..079e1adc6121 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -70,7 +70,8 @@ static struct desc_struct startup_gdt[GDT_ENTRIES] __initconst = {
 	asm("movq $" __stringify(sym) ", %0":"=r"(__v));		\
 	__v; })
 
-static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd)
+static unsigned long __pitext sme_postprocess_startup(struct boot_params *bp,
+						      pmdval_t *pmd)
 {
 	unsigned long vaddr, vaddr_end;
 	int i;
@@ -113,7 +114,7 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv
 	return sme_get_me_mask();
 }
 
-unsigned long __head __startup_64(struct boot_params *bp)
+unsigned long __pitext __startup_64(struct boot_params *bp)
 {
 	unsigned long physaddr = (unsigned long)_text;
 	unsigned long load_delta, *p;
@@ -508,7 +509,7 @@ void __init __noreturn x86_64_start_reservations(char *real_mode_data)
  */
 static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
 
-static void early_load_idt(void (*handler)(void))
+static void __pitext early_load_idt(void (*handler)(void))
 {
 	gate_desc *idt = bringup_idt_table;
 	struct desc_ptr bringup_idt_descr;
@@ -539,7 +540,7 @@ void early_setup_idt(void)
 /*
  * Setup boot CPU state needed before kernel switches to virtual addresses.
  */
-void __head startup_64_setup_env(void)
+void __pitext startup_64_setup_env(void)
 {
 	struct desc_ptr startup_gdt_descr;
 
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index b8704ac1a4da..5defefcc7f50 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -42,6 +42,15 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map)
 	__HEAD
 	.code64
 SYM_CODE_START_NOALIGN(startup_64)
+	UNWIND_HINT_END_OF_STACK
+	jmp	primary_startup_64
+SYM_CODE_END(startup_64)
+
+	__PITEXT
+#include "verify_cpu.S"
+#include "sev_verify_cbit.S"
+
+SYM_CODE_START_LOCAL(primary_startup_64)
 	UNWIND_HINT_END_OF_STACK
 	/*
 	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
@@ -131,10 +140,12 @@ SYM_CODE_START_NOALIGN(startup_64)
 	movq	%rax, %cr3
 
 	/* Branch to the common startup code at its kernel virtual address */
-	movq	$common_startup_64, %rax
 	ANNOTATE_RETPOLINE_SAFE
-	jmp	*%rax
-SYM_CODE_END(startup_64)
+	jmp	*.Lcommon_startup_64(%rip)
+SYM_CODE_END(primary_startup_64)
+
+	__INITRODATA
+SYM_DATA_LOCAL(.Lcommon_startup_64, .quad common_startup_64)
 
 	.text
 SYM_CODE_START(secondary_startup_64)
@@ -410,9 +421,6 @@ SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
 	int3
 SYM_CODE_END(secondary_startup_64)
 
-#include "verify_cpu.S"
-#include "sev_verify_cbit.S"
-
 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
 /*
  * Entry point for soft restart of a CPU. Invoked from xxx_play_dead() for
@@ -539,10 +547,8 @@ SYM_CODE_END(early_idt_handler_common)
  * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
  *
  * XXX it does, fix this.
- *
- * This handler will end up in the .init.text section and not be
- * available to boot secondary CPUs.
  */
+	__PITEXT
 SYM_CODE_START_NOALIGN(vc_no_ghcb)
 	UNWIND_HINT_IRET_REGS offset=8
 	ENDBR
-- 
2.43.0.429.g432eaa2c6b-goog


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ