[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <176191333027.2601451.11257491536675159729.tip-bot2@tip-bot2>
Date: Fri, 31 Oct 2025 12:22:10 -0000
From: "tip-bot2 for John Allen" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: John Allen <john.allen@....com>, "Borislav Petkov (AMD)" <bp@...en8.de>,
Tom Lendacky <thomas.lendacky@....com>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: x86/sev] x86/boot: Move boot_*msr helpers to asm/shared/msr.h
The following commit has been merged into the x86/sev branch of tip:
Commit-ID: 9249bcdea0c6db4f450a9267aa6da5b4dd4153ca
Gitweb: https://git.kernel.org/tip/9249bcdea0c6db4f450a9267aa6da5b4dd4153ca
Author: John Allen <john.allen@....com>
AuthorDate: Wed, 24 Sep 2025 20:08:51
Committer: Borislav Petkov (AMD) <bp@...en8.de>
CommitterDate: Thu, 30 Oct 2025 16:29:53 +01:00
x86/boot: Move boot_*msr helpers to asm/shared/msr.h
The boot_{rdmsr,wrmsr}() helpers are *just* the barebones MSR access
functionality, without any tracing or exception handling glue as it is done in
kernel proper.
Move these helpers to asm/shared/msr.h and rename to raw_{rdmsr,wrmsr}() to
indicate what they are.
[ bp: Correct the reason why those helpers exist. I should've caught that in
the original patch that added them:
176db622573f ("x86/boot: Introduce helpers for MSR reads/writes"
but oh well...
- fixup include path delimiters to <> ]
Signed-off-by: John Allen <john.allen@....com>
Signed-off-by: Borislav Petkov (AMD) <bp@...en8.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@....com>
Link: https://patch.msgid.link/all/20250924200852.4452-2-john.allen@amd.com
---
arch/x86/boot/compressed/sev.c | 7 ++++---
arch/x86/boot/compressed/sev.h | 6 +++---
arch/x86/boot/cpucheck.c | 16 ++++++++--------
arch/x86/boot/msr.h | 26 --------------------------
arch/x86/include/asm/shared/msr.h | 15 +++++++++++++++
5 files changed, 30 insertions(+), 40 deletions(-)
delete mode 100644 arch/x86/boot/msr.h
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 6e5c32a..c8c1464 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -14,6 +14,7 @@
#include <asm/bootparam.h>
#include <asm/pgtable_types.h>
+#include <asm/shared/msr.h>
#include <asm/sev.h>
#include <asm/trapnr.h>
#include <asm/trap_pf.h>
@@ -397,7 +398,7 @@ void sev_enable(struct boot_params *bp)
}
/* Set the SME mask if this is an SEV guest. */
- boot_rdmsr(MSR_AMD64_SEV, &m);
+ raw_rdmsr(MSR_AMD64_SEV, &m);
sev_status = m.q;
if (!(sev_status & MSR_AMD64_SEV_ENABLED))
return;
@@ -446,7 +447,7 @@ u64 sev_get_status(void)
if (sev_check_cpu_support() < 0)
return 0;
- boot_rdmsr(MSR_AMD64_SEV, &m);
+ raw_rdmsr(MSR_AMD64_SEV, &m);
return m.q;
}
@@ -496,7 +497,7 @@ bool early_is_sevsnp_guest(void)
struct msr m;
/* Obtain the address of the calling area to use */
- boot_rdmsr(MSR_SVSM_CAA, &m);
+ raw_rdmsr(MSR_SVSM_CAA, &m);
boot_svsm_caa_pa = m.q;
/*
diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h
index 92f79c2..22637b4 100644
--- a/arch/x86/boot/compressed/sev.h
+++ b/arch/x86/boot/compressed/sev.h
@@ -10,7 +10,7 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
-#include "../msr.h"
+#include <asm/shared/msr.h>
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 sev_get_status(void);
@@ -20,7 +20,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
{
struct msr m;
- boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
+ raw_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
return m.q;
}
@@ -30,7 +30,7 @@ static inline void sev_es_wr_ghcb_msr(u64 val)
struct msr m;
m.q = val;
- boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
+ raw_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
}
#else
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index f82de8d..2e1bb93 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -26,9 +26,9 @@
#include <asm/intel-family.h>
#include <asm/processor-flags.h>
#include <asm/msr-index.h>
+#include <asm/shared/msr.h>
#include "string.h"
-#include "msr.h"
static u32 err_flags[NCAPINTS];
@@ -134,9 +134,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
struct msr m;
- boot_rdmsr(MSR_K7_HWCR, &m);
+ raw_rdmsr(MSR_K7_HWCR, &m);
m.l &= ~(1 << 15);
- boot_wrmsr(MSR_K7_HWCR, &m);
+ raw_wrmsr(MSR_K7_HWCR, &m);
get_cpuflags(); /* Make sure it really did something */
err = check_cpuflags();
@@ -148,9 +148,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
struct msr m;
- boot_rdmsr(MSR_VIA_FCR, &m);
+ raw_rdmsr(MSR_VIA_FCR, &m);
m.l |= (1 << 1) | (1 << 7);
- boot_wrmsr(MSR_VIA_FCR, &m);
+ raw_wrmsr(MSR_VIA_FCR, &m);
set_bit(X86_FEATURE_CX8, cpu.flags);
err = check_cpuflags();
@@ -160,14 +160,14 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
struct msr m, m_tmp;
u32 level = 1;
- boot_rdmsr(0x80860004, &m);
+ raw_rdmsr(0x80860004, &m);
m_tmp = m;
m_tmp.l = ~0;
- boot_wrmsr(0x80860004, &m_tmp);
+ raw_wrmsr(0x80860004, &m_tmp);
asm("cpuid"
: "+a" (level), "=d" (cpu.flags[0])
: : "ecx", "ebx");
- boot_wrmsr(0x80860004, &m);
+ raw_wrmsr(0x80860004, &m);
err = check_cpuflags();
} else if (err == 0x01 &&
diff --git a/arch/x86/boot/msr.h b/arch/x86/boot/msr.h
deleted file mode 100644
index aed66f7..0000000
--- a/arch/x86/boot/msr.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Helpers/definitions related to MSR access.
- */
-
-#ifndef BOOT_MSR_H
-#define BOOT_MSR_H
-
-#include <asm/shared/msr.h>
-
-/*
- * The kernel proper already defines rdmsr()/wrmsr(), but they are not for the
- * boot kernel since they rely on tracepoint/exception handling infrastructure
- * that's not available here.
- */
-static inline void boot_rdmsr(unsigned int reg, struct msr *m)
-{
- asm volatile("rdmsr" : "=a" (m->l), "=d" (m->h) : "c" (reg));
-}
-
-static inline void boot_wrmsr(unsigned int reg, const struct msr *m)
-{
- asm volatile("wrmsr" : : "c" (reg), "a"(m->l), "d" (m->h) : "memory");
-}
-
-#endif /* BOOT_MSR_H */
diff --git a/arch/x86/include/asm/shared/msr.h b/arch/x86/include/asm/shared/msr.h
index 1e6ec10..a20b1c0 100644
--- a/arch/x86/include/asm/shared/msr.h
+++ b/arch/x86/include/asm/shared/msr.h
@@ -12,4 +12,19 @@ struct msr {
};
};
+/*
+ * The kernel proper already defines rdmsr()/wrmsr(), but they are not for the
+ * boot kernel since they rely on tracepoint/exception handling infrastructure
+ * that's not available here.
+ */
+static inline void raw_rdmsr(unsigned int reg, struct msr *m)
+{
+ asm volatile("rdmsr" : "=a" (m->l), "=d" (m->h) : "c" (reg));
+}
+
+static inline void raw_wrmsr(unsigned int reg, const struct msr *m)
+{
+ asm volatile("wrmsr" : : "c" (reg), "a"(m->l), "d" (m->h) : "memory");
+}
+
#endif /* _ASM_X86_SHARED_MSR_H */
Powered by blists - more mailing lists