[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250617073733.760-1-khaliidcaliy@gmail.com>
Date: Tue, 17 Jun 2025 07:36:25 +0000
From: Khalid Ali <khaliidcaliy@...il.com>
To: tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
dave.hansen@...ux.intel.com,
x86@...nel.org,
ardb@...nel.org
Cc: hpa@...or.com,
linux-kernel@...r.kernel.org,
Khalid Ali <khaliidcaliy@...il.com>,
kernel test robot <lkp@...el.com>
Subject: [PATCH v3] x86/boot: Don't return encryption mask from __startup_64()
Avoid returning the SME encryption mask from __startup_64(), and instead
let the function handle encryption directly as needed. The encryption
mask is already available to callers and can be accessed via
sme_get_me_mask() in C code, or directly through the sme_me_mask symbol
in assembly, if CONFIG_AMD_MEM_ENCRYPT is enabled.
This change aligns with how secondary_startup_64_no_verify handles SME
and keeps the behavior consistent. For Intel CPUs, SME is not relevant,
so there is no need to retrieve the mask unless CONFIG_AMD_MEM_ENCRYPT
is enabled.
Signed-off-by: Khalid Ali <khaliidcaliy@...il.com>
Reported-by: kernel test robot <lkp@...el.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202506171012.Ji3c5sJh-lkp@intel.com/
---
arch/x86/boot/startup/map_kernel.c | 11 +++--------
arch/x86/include/asm/setup.h | 2 +-
arch/x86/kernel/head_64.S | 10 ++++------
3 files changed, 8 insertions(+), 15 deletions(-)
diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c
index 332dbe6688c4..6fdb340e9147 100644
--- a/arch/x86/boot/startup/map_kernel.c
+++ b/arch/x86/boot/startup/map_kernel.c
@@ -30,7 +30,7 @@ static inline bool check_la57_support(void)
return true;
}
-static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
+static void __head sme_postprocess_startup(struct boot_params *bp,
pmdval_t *pmd,
unsigned long p2v_offset)
{
@@ -68,11 +68,6 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
}
}
- /*
- * Return the SME encryption mask (if SME is active) to be used as a
- * modifier for the initial pgdir entry programmed into CR3.
- */
- return sme_get_me_mask();
}
/*
@@ -84,7 +79,7 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
* the 1:1 mapping of memory. Kernel virtual addresses can be determined by
* subtracting p2v_offset from the RIP-relative address.
*/
-unsigned long __head __startup_64(unsigned long p2v_offset,
+void __head __startup_64(unsigned long p2v_offset,
struct boot_params *bp)
{
pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);
@@ -213,5 +208,5 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
for (; i < PTRS_PER_PMD; i++)
pmd[i] &= ~_PAGE_PRESENT;
- return sme_postprocess_startup(bp, pmd, p2v_offset);
+ sme_postprocess_startup(bp, pmd, p2v_offset);
}
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 692af46603a1..29ea24bb85ff 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -50,7 +50,7 @@ extern unsigned long acpi_realmode_flags;
extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
-extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
+extern void __startup_64(unsigned long p2v_offset, struct boot_params *bp);
extern void startup_64_setup_gdt_idt(void);
extern void startup_64_load_idt(void *vc_handler);
extern void early_setup_idt(void);
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 3e9b3a3bd039..4390a28f7dad 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -106,18 +106,16 @@ SYM_CODE_START_NOALIGN(startup_64)
/*
* Perform pagetable fixups. Additionally, if SME is active, encrypt
- * the kernel and retrieve the modifier (SME encryption mask if SME
- * is active) to be added to the initial pgdir entry that will be
- * programmed into CR3.
+ * the kernel.
*/
movq %r15, %rsi
call __startup_64
/* Form the CR3 value being sure to include the CR3 modifier */
- leaq early_top_pgt(%rip), %rcx
- addq %rcx, %rax
-
+ leaq early_top_pgt(%rip), %rax
+
#ifdef CONFIG_AMD_MEM_ENCRYPT
+ addq sme_me_mask(%rip), %rax
mov %rax, %rdi
/*
--
2.49.0
Powered by blists - more mailing lists