lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20230412184502.145289-7-brgerst@gmail.com>
Date:   Wed, 12 Apr 2023 14:45:02 -0400
From:   Brian Gerst <brgerst@...il.com>
To:     linux-kernel@...r.kernel.org, x86@...nel.org
Cc:     Thomas Gleixner <tglx@...utronix.de>,
        Borislav Petkov <bp@...en8.de>,
        "H . Peter Anvin" <hpa@...or.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...nel.org>, Brian Gerst <brgerst@...il.com>
Subject: [PATCH v2 6/6] x86/boot: Use copied boot data in sme_enable()

Use the copied version instead of the original real mode data.

Signed-off-by: Brian Gerst <brgerst@...il.com>
---
 arch/x86/include/asm/mem_encrypt.h |  4 +--
 arch/x86/kernel/head_64.S          |  8 +-----
 arch/x86/mm/mem_encrypt_identity.c | 42 ++++++++++++++++--------------
 3 files changed, 26 insertions(+), 28 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index b7126701574c..74f094eb88a6 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -40,7 +40,7 @@ void __init sme_early_init(void);
 void __init sev_setup_arch(void);
 
 void __init sme_encrypt_kernel(struct boot_params *bp);
-void __init sme_enable(struct boot_params *bp);
+void __init sme_enable(void);
 
 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
@@ -70,7 +70,7 @@ static inline void __init sme_early_init(void) { }
 static inline void __init sev_setup_arch(void) { }
 
 static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
-static inline void __init sme_enable(struct boot_params *bp) { }
+static inline void __init sme_enable(void) { }
 
 static inline void sev_es_init_vc_handling(void) { }
 
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index c7b2ef379f42..c6ea37712921 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -72,14 +72,11 @@ SYM_CODE_START_NOALIGN(startup_64)
 	wrmsr
 
 	leaq	_text(%rip), %rdi
-	pushq	%rsi
+	/* RSI contains address of real_mode_data */
 	call	copy_bootdata
-	popq	%rsi
 
 	leaq	_text(%rip), %rdi
-	pushq	%rsi
 	call	startup_64_setup_env
-	popq	%rsi
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 	/*
@@ -88,10 +85,7 @@ SYM_CODE_START_NOALIGN(startup_64)
 	 * which needs to be done before any CPUID instructions are executed in
 	 * subsequent code.
 	 */
-	movq	%rsi, %rdi
-	pushq	%rsi
 	call	sme_enable
-	popq	%rsi
 #endif
 
 	/* Now switch to __KERNEL_CS so IRET works reliably */
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index c6efcf559d88..e75e4f5a8a71 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -502,8 +502,9 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
 	native_write_cr3(__native_read_cr3());
 }
 
-void __init sme_enable(struct boot_params *bp)
+void __init sme_enable(void)
 {
+	struct boot_params *bp;
 	const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
 	unsigned int eax, ebx, ecx, edx;
 	unsigned long feature_mask;
@@ -513,6 +514,27 @@ void __init sme_enable(struct boot_params *bp)
 	bool snp;
 	u64 msr;
 
+	/*
+	 * Fixups have not been applied to phys_base yet and we're running
+	 * identity mapped, so we must obtain the address to global data
+	 * using rip-relative addressing.
+	 */
+	asm("lea sme_cmdline_arg(%%rip), %0"
+	    : "=r" (cmdline_arg)
+	    : "p" (sme_cmdline_arg));
+	asm("lea sme_cmdline_on(%%rip), %0"
+	    : "=r" (cmdline_on)
+	    : "p" (sme_cmdline_on));
+	asm("lea sme_cmdline_off(%%rip), %0"
+	    : "=r" (cmdline_off)
+	    : "p" (sme_cmdline_off));
+	asm("lea boot_params(%%rip), %0"
+	    : "=r" (bp)
+	    : "m" (boot_params));
+	asm("lea boot_command_line(%%rip), %0"
+	    : "=r" (cmdline_ptr)
+	    : "p" (boot_command_line));
+
 	snp = snp_init(bp);
 
 	/* Check for the SME/SEV support leaf */
@@ -577,29 +599,11 @@ void __init sme_enable(struct boot_params *bp)
 		goto out;
 	}
 
-	/*
-	 * Fixups have not been applied to phys_base yet and we're running
-	 * identity mapped, so we must obtain the address to the SME command
-	 * line argument data using rip-relative addressing.
-	 */
-	asm ("lea sme_cmdline_arg(%%rip), %0"
-	     : "=r" (cmdline_arg)
-	     : "p" (sme_cmdline_arg));
-	asm ("lea sme_cmdline_on(%%rip), %0"
-	     : "=r" (cmdline_on)
-	     : "p" (sme_cmdline_on));
-	asm ("lea sme_cmdline_off(%%rip), %0"
-	     : "=r" (cmdline_off)
-	     : "p" (sme_cmdline_off));
-
 	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
 		active_by_default = true;
 	else
 		active_by_default = false;
 
-	cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
-				     ((u64)bp->ext_cmd_line_ptr << 32));
-
 	if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
 		return;
 
-- 
2.39.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ