[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250807165950.14953-2-kim.phillips@amd.com>
Date: Thu, 7 Aug 2025 11:59:50 -0500
From: Kim Phillips <kim.phillips@....com>
To: <linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>,
<linux-coco@...ts.linux.dev>, <x86@...nel.org>
CC: Peter Zijlstra <peterz@...radead.org>, Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>, Dave Hansen
<dave.hansen@...ux.intel.com>, Sean Christopherson <seanjc@...gle.com>,
"Paolo Bonzini" <pbonzini@...hat.com>, Ingo Molnar <mingo@...hat.com>, "H.
Peter Anvin" <hpa@...or.com>, Thomas Gleixner <tglx@...utronix.de>, K Prateek
Nayak <kprateek.nayak@....com>, "Nikunj A . Dadhania" <nikunj@....com>, "Tom
Lendacky" <thomas.lendacky@....com>, Michael Roth <michael.roth@....com>,
Ashish Kalra <ashish.kalra@....com>, Borislav Petkov
<borislav.petkov@....com>, Borislav Petkov <bp@...en8.de>, Nathan Fontenot
<nathan.fontenot@....com>, Dhaval Giani <Dhaval.Giani@....com>, "Santosh
Shukla" <santosh.shukla@....com>, Naveen Rao <naveen.rao@....com>, "Gautham R
. Shenoy" <gautham.shenoy@....com>, Ananth Narayan <ananth.narayan@....com>,
Pankaj Gupta <pankaj.gupta@....com>, David Kaplan <david.kaplan@....com>,
"Jon Grimm" <Jon.Grimm@....com>, Kim Phillips <kim.phillips@....com>
Subject: [RFC PATCH 1/1] KVM: SEV: Add support for SMT Protection
Add the new CPUID bit that indicates available hardware support:
CPUID_Fn8000001F_EAX [AMD Secure Encryption EAX] bit 25.
Indicate support for SEV_FEATURES bit 15 (SmtProtection) to be set by
an SNP guest to enable the feature.
Handle the new "IDLE_REQUIRED" VMRUN exit code case that indicates that
the hardware has detected that the sibling of the vCPU is not in the
idle state. If the new IDLE_REQUIRED error code is returned, return
to the guest. Ideally this would be optimized to rendezvous with
sibling idle state transitions.
Program new HLT_WAKEUP_ICR MSRs on all pCPUs with their sibling
ACPI IDs. This enables hardware/microcode to 'kick' the pCPU running
the vCPU when its sibling needs to process a pending interrupt.
For more information, see "15.36.17 Side-Channel Protection",
"SMT Protection", in:
"AMD64 Architecture Programmer's Manual Volume 2: System Programming Part 2,
Pub. 24593 Rev. 3.42 - March 2024"
available here:
https://bugzilla.kernel.org/attachment.cgi?id=306250
Signed-off-by: Kim Phillips <kim.phillips@....com>
---
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/include/asm/msr-index.h | 1 +
arch/x86/include/asm/svm.h | 1 +
arch/x86/include/uapi/asm/svm.h | 1 +
arch/x86/kvm/svm/sev.c | 17 +++++++++++++++++
arch/x86/kvm/svm/svm.c | 3 +++
6 files changed, 24 insertions(+)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 286d509f9363..4536fe40f5aa 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -448,6 +448,7 @@
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" SEV-ES full debug state swap support */
#define X86_FEATURE_RMPREAD (19*32+21) /* RMPREAD instruction */
#define X86_FEATURE_SEGMENTED_RMP (19*32+23) /* Segmented RMP support */
+#define X86_FEATURE_SMT_PROTECTION (19*32+25) /* SEV-SNP SMT Protection */
#define X86_FEATURE_ALLOWED_SEV_FEATURES (19*32+27) /* Allowed SEV Features */
#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
#define X86_FEATURE_HV_INUSE_WR_ALLOWED (19*32+30) /* Allow Write to in-use hypervisor-owned pages */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index c29127ac626a..a75999a93c3f 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -707,6 +707,7 @@
#define MSR_AMD64_SEG_RMP_ENABLED_BIT 0
#define MSR_AMD64_SEG_RMP_ENABLED BIT_ULL(MSR_AMD64_SEG_RMP_ENABLED_BIT)
#define MSR_AMD64_RMP_SEGMENT_SHIFT(x) (((x) & GENMASK_ULL(13, 8)) >> 8)
+#define MSR_AMD64_HLT_WAKEUP_ICR 0xc0010137
#define MSR_SVSM_CAA 0xc001f000
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index ffc27f676243..251cead18681 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -299,6 +299,7 @@ static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_
#define SVM_SEV_FEAT_RESTRICTED_INJECTION BIT(3)
#define SVM_SEV_FEAT_ALTERNATE_INJECTION BIT(4)
#define SVM_SEV_FEAT_DEBUG_SWAP BIT(5)
+#define SVM_SEV_FEAT_SMT_PROTECTION BIT(15)
#define VMCB_ALLOWED_SEV_FEATURES_VALID BIT_ULL(63)
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 9c640a521a67..7b81ee574c55 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -126,6 +126,7 @@
/* SW_EXITINFO1[11:4] */ \
((((u64)reason_code) & 0xff) << 4))
#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
+#define SVM_VMGEXIT_IDLE_REQUIRED 0xfffffffd
/* Exit code reserved for hypervisor/software use */
#define SVM_EXIT_SW 0xf0000000
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 2fbdebf79fbb..5f2605bd265f 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -3082,6 +3082,23 @@ void __init sev_hardware_setup(void)
sev_supported_vmsa_features = 0;
if (sev_es_debug_swap_enabled)
sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP;
+
+ if (sev_snp_enabled && cpu_feature_enabled(X86_FEATURE_SMT_PROTECTION)) {
+ unsigned long long hlt_wakeup_icr;
+ unsigned int cpu, sibling;
+
+ sev_supported_vmsa_features |= SVM_SEV_FEAT_SMT_PROTECTION;
+
+ for_each_online_cpu(cpu) {
+ for_each_cpu(sibling, topology_sibling_cpumask(cpu)) {
+ if (sibling == cpu)
+ continue;
+ hlt_wakeup_icr = LOCAL_TIMER_VECTOR | (unsigned long long)
+ per_cpu(x86_cpu_to_apicid, sibling) << 32;
+ wrmsrq_safe_on_cpu(cpu, MSR_AMD64_HLT_WAKEUP_ICR, hlt_wakeup_icr);
+ }
+ }
+ }
}
void sev_hardware_unsetup(void)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d9931c6c4bc6..708c5e939b0d 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3502,6 +3502,9 @@ static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
{
+ if (exit_code == SVM_VMGEXIT_IDLE_REQUIRED)
+ return 1; /* resume guest */
+
if (!svm_check_exit_valid(exit_code))
return svm_handle_invalid_exit(vcpu, exit_code);
--
2.43.0
Powered by blists - more mailing lists