[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250502223456.887618-1-seanjc@google.com>
Date: Fri, 2 May 2025 15:34:56 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Michael Larabel <Michael@...haellarabel.com>, Borislav Petkov <bp@...en8.de>
Subject: [PATCH] KVM: SVM: Set/clear SRSO's BP_SPEC_REDUCE on 0 <=> 1 VM count transitions
Set the magic BP_SPEC_REDUCE bit to mitigate SRSO when running VMs if and
only if KVM has at least one active VM. Leaving the bit set at all times
unfortunately degrades performance by a wee bit more than expected.
Use a dedicated spinlock and counter instead of hooking virtualization
enablement, as changing the behavior of kvm.enable_virt_at_load based on
SRSO_BP_SPEC_REDUCE is painful, and has its own drawbacks, e.g. could
result in performance issues for flows that are sensitive to VM creation
latency.
Similarly, don't bother optimizing the 1=>N and N=>1 transitions, e.g. by
using atomic_inc_return() to avoid taking the spinlock, as ensuring that
BP_SPEC_REDUCE is guaranteed to be set before KVM_RUN is non-trivial. KVM
already serializes VM creation against kvm_lock (to add the VM to vm_list),
and the spinlock will only be held for a handful of cycles for the 1<=>N
cases. I.e. the complexity needed to ensure correctness outweighs the
marginal benefits of eliding the lock. See the Link for details.
Link: https://lore.kernel.org/all/aBOnzNCngyS_pQIW@google.com
Fixes: 8442df2b49ed ("x86/bugs: KVM: Add support for SRSO_MSR_FIX")
Reported-by: Michael Larabel <Michael@...haellarabel.com>
Closes: https://www.phoronix.com/review/linux-615-amd-regression
Cc: Borislav Petkov <bp@...en8.de>
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/svm/svm.c | 43 ++++++++++++++++++++++++++++++++++++------
1 file changed, 37 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index cc1c721ba067..364959fd1040 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -607,9 +607,6 @@ static void svm_disable_virtualization_cpu(void)
kvm_cpu_svm_disable();
amd_pmu_disable_virt();
-
- if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
- msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
}
static int svm_enable_virtualization_cpu(void)
@@ -687,9 +684,6 @@ static int svm_enable_virtualization_cpu(void)
rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
}
- if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
- msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
-
return 0;
}
@@ -5032,10 +5026,46 @@ static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
sev_vcpu_deliver_sipi_vector(vcpu, vector);
}
+#ifdef CONFIG_CPU_MITIGATIONS
+static DEFINE_SPINLOCK(srso_lock);
+static int srso_nr_vms;
+
+static void svm_toggle_srso_spec_reduce(void *set)
+{
+ if (set)
+ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+ else
+ msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+}
+
+static void svm_srso_add_remove_vm(int count)
+{
+ bool set;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+ return;
+
+ guard(spinlock)(&srso_lock);
+
+ set = !srso_nr_vms;
+ srso_nr_vms += count;
+
+ WARN_ON_ONCE(srso_nr_vms < 0);
+ if (!set && srso_nr_vms)
+ return;
+
+ on_each_cpu(svm_toggle_srso_spec_reduce, (void *)set, 1);
+}
+#else
+static void svm_srso_add_remove_vm(int count) { }
+#endif
+
static void svm_vm_destroy(struct kvm *kvm)
{
avic_vm_destroy(kvm);
sev_vm_destroy(kvm);
+
+ svm_srso_add_remove_vm(-1);
}
static int svm_vm_init(struct kvm *kvm)
@@ -5061,6 +5091,7 @@ static int svm_vm_init(struct kvm *kvm)
return ret;
}
+ svm_srso_add_remove_vm(1);
return 0;
}
base-commit: 45eb29140e68ffe8e93a5471006858a018480a45
--
2.49.0.906.g1f30a19c02-goog
Powered by blists - more mailing lists