[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <80fbaaba88d07541bed458dc01d12cdd6623086e.1709922929.git.thomas.lendacky@amd.com>
Date: Fri, 8 Mar 2024 12:35:22 -0600
From: Tom Lendacky <thomas.lendacky@....com>
To: <linux-kernel@...r.kernel.org>, <x86@...nel.org>
CC: Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>, Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>, Andy Lutomirski <luto@...nel.org>, "Peter
Zijlstra" <peterz@...radead.org>, Dan Williams <dan.j.williams@...el.com>,
Michael Roth <michael.roth@....com>, Ashish Kalra <ashish.kalra@....com>
Subject: [PATCH v2 07/14] x86/sev: Provide SVSM discovery support
The SVSM specification documents an alternative method of discovery for
the SVSM using a reserved CPUID bit and a reserved MSR.
For the CPUID support, the SNP CPUID table is updated to set bit 28 of
the EAX register of the 0x8000001f leaf when an SVSM is present. This bit
has been reserved for use in this capacity.
For the MSR support, a new reserved MSR 0xc001f000 has been defined. A #VC
should be generated when accessing this MSR. The #VC handler is expected
to ignore writes to this MSR and return the physical calling area address
(CAA) on reads of this MSR.
Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
---
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/include/asm/msr-index.h | 2 ++
arch/x86/kernel/sev-shared.c | 11 +++++++++++
arch/x86/kernel/sev.c | 17 +++++++++++++++++
4 files changed, 31 insertions(+)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 0343caa016a9..a07ce7b4ed0d 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -446,6 +446,7 @@
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */
+#define X86_FEATURE_SVSM_PRESENT (19*32+28) /* "" SNP SVSM is present */
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 24c575cdd6b9..7464f7ae6683 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -646,6 +646,8 @@
#define MSR_AMD64_RMP_BASE 0xc0010132
#define MSR_AMD64_RMP_END 0xc0010133
+#define MSR_SVSM_CAA 0xc001f000
+
/* AMD Collaborative Processor Performance Control MSRs */
#define MSR_AMD_CPPC_CAP1 0xc00102b0
#define MSR_AMD_CPPC_ENABLE 0xc00102b1
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 46dcc0af7d4b..07ad088337e1 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -1559,6 +1559,8 @@ static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
static void __init setup_svsm_ca(const struct cc_blob_sev_info *cc_info)
{
struct snp_secrets_page_layout *secrets_page;
+ struct snp_cpuid_table *cpuid_table;
+ unsigned int i;
u64 caa;
BUILD_BUG_ON(sizeof(*secrets_page) != PAGE_SIZE);
@@ -1596,4 +1598,13 @@ static void __init setup_svsm_ca(const struct cc_blob_sev_info *cc_info)
*/
boot_svsm_caa = (struct svsm_ca *)caa;
boot_svsm_caa_pa = caa;
+
+ /* Advertise the SVSM presence via CPUID. */
+ cpuid_table = (struct snp_cpuid_table *)snp_cpuid_get_table();
+ for (i = 0; i < cpuid_table->count; i++) {
+ struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
+
+ if (fn->eax_in == 0x8000001f)
+ fn->eax |= BIT(28);
+ }
}
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index e11cb61f0d1a..b85f1b455eb8 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -1347,12 +1347,29 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
return 0;
}
+static enum es_result vc_handle_svsm_caa_msr(struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+
+ /* Writes to the SVSM CAA msr are ignored */
+ if (ctxt->insn.opcode.bytes[1] == 0x30)
+ return ES_OK;
+
+ regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
+ regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa));
+
+ return ES_OK;
+}
+
static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{
struct pt_regs *regs = ctxt->regs;
enum es_result ret;
u64 exit_info_1;
+ if (regs->cx == MSR_SVSM_CAA)
+ return vc_handle_svsm_caa_msr(ctxt);
+
/* Is it a WRMSR? */
exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
--
2.43.2
Powered by blists - more mailing lists