lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250716091332.737-1-khaliidcaliy@gmail.com>
Date: Wed, 16 Jul 2025 09:11:34 +0000
From: Khalid Ali <khaliidcaliy@...il.com>
To: tglx@...utronix.de,
	mingo@...hat.com,
	bp@...en8.de,
	dave.hansen@...ux.intel.com
Cc: x86@...nel.org,
	hpa@...or.com,
	linux-kernel@...r.kernel.org,
	Khalid Ali <khaliidcaly@...il.com>,
	Kai Huang <kai.huang@...el.com>
Subject: [PATCH v8] x86/boot: Don't return encryption mask from __startup_64()

From: Khalid Ali <khaliidcaly@...il.com>

Avoid returning encryption mask to callers of __startup_64().

The encryption mask is available to callers and canbe accessed
sme_get_me_mask() in C code and sme_me_mask symbol in assembly code.

This change aligns the way the mask is retrieved in
secondary_startup_64_no_verify().

No functionality change intended, only consistency improvement, so the
code becomes cleaner.

On intel processors sme_me_mask is zero. So, it is safe to add mask only
if CONFIG_AMD_MEM_ENCRYPT.

Signed-off-by: Khalid Ali <khaliidcaly@...il.com>
Acked-by: Kai Huang <kai.huang@...el.com>
---

Changes in v8:
  * Improve commit message for clarity.

Changes in v7:
  * Improve commit message for better clarity.
  * Add Huang, Kai Ack tag.
  * Fix parameter aligment.
  * Fix patch style issues.

 arch/x86/boot/startup/map_kernel.c | 17 ++++++-----------
 arch/x86/include/asm/setup.h       |  2 +-
 arch/x86/kernel/head_64.S          | 14 ++++++++------
 3 files changed, 15 insertions(+), 18 deletions(-)

diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c
index 332dbe6688c4..d25e849af563 100644
--- a/arch/x86/boot/startup/map_kernel.c
+++ b/arch/x86/boot/startup/map_kernel.c
@@ -30,9 +30,9 @@ static inline bool check_la57_support(void)
 	return true;
 }
 
-static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
-						    pmdval_t *pmd,
-						    unsigned long p2v_offset)
+static void __head sme_postprocess_startup(struct boot_params *bp,
+					   pmdval_t *pmd,
+					   unsigned long p2v_offset)
 {
 	unsigned long paddr, paddr_end;
 	int i;
@@ -68,11 +68,6 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
 		}
 	}
 
-	/*
-	 * Return the SME encryption mask (if SME is active) to be used as a
-	 * modifier for the initial pgdir entry programmed into CR3.
-	 */
-	return sme_get_me_mask();
 }
 
 /*
@@ -84,8 +79,8 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
  * the 1:1 mapping of memory. Kernel virtual addresses can be determined by
  * subtracting p2v_offset from the RIP-relative address.
  */
-unsigned long __head __startup_64(unsigned long p2v_offset,
-				  struct boot_params *bp)
+void __head __startup_64(unsigned long p2v_offset,
+			 struct boot_params *bp)
 {
 	pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);
 	unsigned long physaddr = (unsigned long)rip_rel_ptr(_text);
@@ -213,5 +208,5 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
 	for (; i < PTRS_PER_PMD; i++)
 		pmd[i] &= ~_PAGE_PRESENT;
 
-	return sme_postprocess_startup(bp, pmd, p2v_offset);
+	sme_postprocess_startup(bp, pmd, p2v_offset);
 }
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 692af46603a1..c09a4bf18706 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -50,7 +50,7 @@ extern unsigned long acpi_realmode_flags;
 
 extern void reserve_standard_io_resources(void);
 extern void i386_reserve_resources(void);
-extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
+extern void  __startup_64(unsigned long p2v_offset, struct boot_params *bp);
 extern void startup_64_setup_gdt_idt(void);
 extern void startup_64_load_idt(void *vc_handler);
 extern void early_setup_idt(void);
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 3e9b3a3bd039..0cba493cab26 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -106,18 +106,20 @@ SYM_CODE_START_NOALIGN(startup_64)
 
 	/*
 	 * Perform pagetable fixups. Additionally, if SME is active, encrypt
-	 * the kernel and retrieve the modifier (SME encryption mask if SME
-	 * is active) to be added to the initial pgdir entry that will be
-	 * programmed into CR3.
-	 */
+	 * the kernel.
+	*/
 	movq	%r15, %rsi
 	call	__startup_64
 
 	/* Form the CR3 value being sure to include the CR3 modifier */
-	leaq	early_top_pgt(%rip), %rcx
-	addq	%rcx, %rax
+	leaq	early_top_pgt(%rip), %rax
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
+	/* Retrieve the modifier (SME encryption mask if SME
+	 * is active) to be added to the initial pgdir entry that will be
+	 * programmed into CR3.
+	 */
+	addq	sme_me_mask(%rip), %rax
 	mov	%rax, %rdi
 
 	/*
-- 
2.49.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ