[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <174065112507.10177.1542837749277153210.tip-bot2@tip-bot2>
Date: Thu, 27 Feb 2025 10:12:05 -0000
From: "tip-bot2 for Yosry Ahmed" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Yosry Ahmed <yosry.ahmed@...ux.dev>, Ingo Molnar <mingo@...nel.org>,
Josh Poimboeuf <jpoimboe@...nel.org>, Sean Christopherson <seanjc@...gle.com>,
x86@...nel.org, linux-kernel@...r.kernel.org
Subject:
[tip: x86/bugs] x86/bugs: Move the X86_FEATURE_USE_IBPB check into callers
The following commit has been merged into the x86/bugs branch of tip:
Commit-ID: 549435aab49ae83d60a08795de6cf0e866f3b2ec
Gitweb: https://git.kernel.org/tip/549435aab49ae83d60a08795de6cf0e866f3b2ec
Author: Yosry Ahmed <yosry.ahmed@...ux.dev>
AuthorDate: Thu, 27 Feb 2025 01:27:07
Committer: Ingo Molnar <mingo@...nel.org>
CommitterDate: Thu, 27 Feb 2025 10:57:20 +01:00
x86/bugs: Move the X86_FEATURE_USE_IBPB check into callers
indirect_branch_prediction_barrier() only performs the MSR write if
X86_FEATURE_USE_IBPB is set, using alternative_msr_write(). In
preparation for removing X86_FEATURE_USE_IBPB, move the feature check
into the callers so that they can be addressed one-by-one, and use
X86_FEATURE_IBPB instead to guard the MSR write.
Signed-off-by: Yosry Ahmed <yosry.ahmed@...ux.dev>
Signed-off-by: Ingo Molnar <mingo@...nel.org>
Acked-by: Josh Poimboeuf <jpoimboe@...nel.org>
Acked-by: Sean Christopherson <seanjc@...gle.com>
Link: https://lore.kernel.org/r/20250227012712.3193063-2-yosry.ahmed@linux.dev
---
arch/x86/include/asm/nospec-branch.h | 2 +-
arch/x86/kernel/cpu/bugs.c | 2 +-
arch/x86/kvm/svm/svm.c | 3 ++-
arch/x86/kvm/vmx/nested.c | 3 ++-
arch/x86/kvm/vmx/vmx.c | 3 ++-
arch/x86/mm/tlb.c | 7 ++++---
6 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 7e8bf78..7cbb76a 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -515,7 +515,7 @@ extern u64 x86_pred_cmd;
static inline void indirect_branch_prediction_barrier(void)
{
- alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
+ alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_IBPB);
}
/* The Intel SPEC CTRL MSR base value cache */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 1d7afc4..754150f 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -2272,7 +2272,7 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
if (ctrl == PR_SPEC_FORCE_DISABLE)
task_set_spec_ib_force_disable(task);
task_update_spec_tif(task);
- if (task == current)
+ if (task == current && cpu_feature_enabled(X86_FEATURE_USE_IBPB))
indirect_branch_prediction_barrier();
break;
default:
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 77ab66c..57222c3 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1565,7 +1565,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (sd->current_vmcb != svm->vmcb) {
sd->current_vmcb = svm->vmcb;
- if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT))
+ if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT) &&
+ cpu_feature_enabled(X86_FEATURE_USE_IBPB))
indirect_branch_prediction_barrier();
}
if (kvm_vcpu_apicv_active(vcpu))
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 8a7af02..1df427a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -5026,7 +5026,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
* doesn't isolate different VMCSs, i.e. in this case, doesn't provide
* separate modes for L2 vs L1.
*/
- if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+ cpu_feature_enabled(X86_FEATURE_USE_IBPB))
indirect_branch_prediction_barrier();
/* Update any VMCS fields that might have changed while L2 ran */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6c56d52..042b7a8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1477,7 +1477,8 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
* performs IBPB on nested VM-Exit (a single nested transition
* may switch the active VMCS multiple times).
*/
- if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
+ if (cpu_feature_enabled(X86_FEATURE_USE_IBPB) &&
+ (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)))
indirect_branch_prediction_barrier();
}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 6cf881a..4f61d11 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -437,7 +437,8 @@ static void cond_mitigation(struct task_struct *next)
* both have the IBPB bit set.
*/
if (next_mm != prev_mm &&
- (next_mm | prev_mm) & LAST_USER_MM_IBPB)
+ (next_mm | prev_mm) & LAST_USER_MM_IBPB &&
+ cpu_feature_enabled(X86_FEATURE_USE_IBPB))
indirect_branch_prediction_barrier();
}
@@ -447,8 +448,8 @@ static void cond_mitigation(struct task_struct *next)
* different context than the user space task which ran
* last on this CPU.
*/
- if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) !=
- (unsigned long)next->mm)
+ if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) != (unsigned long)next->mm &&
+ cpu_feature_enabled(X86_FEATURE_USE_IBPB))
indirect_branch_prediction_barrier();
}
Powered by blists - more mailing lists