[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1517187532-32286-4-git-send-email-karahmed@amazon.de>
Date: Mon, 29 Jan 2018 01:58:51 +0100
From: KarimAllah Ahmed <karahmed@...zon.de>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org, x86@...nel.org
Cc: Ashok Raj <ashok.raj@...el.com>,
Asit Mallick <asit.k.mallick@...el.com>,
Dave Hansen <dave.hansen@...el.com>,
Arjan Van De Ven <arjan.van.de.ven@...el.com>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Andrea Arcangeli <aarcange@...hat.com>,
Andi Kleen <ak@...ux.intel.com>,
Thomas Gleixner <tglx@...utronix.de>,
Dan Williams <dan.j.williams@...el.com>,
Jun Nakajima <jun.nakajima@...el.com>,
Andy Lutomirski <luto@...nel.org>,
Greg KH <gregkh@...uxfoundation.org>,
Paolo Bonzini <pbonzini@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
David Woodhouse <dwmw@...zon.co.uk>,
KarimAllah Ahmed <karahmed@...zon.de>
Subject: [PATCH v2 3/4] x86/kvm: Add IBPB support
From: Ashok Raj <ashok.raj@...el.com>
Add MSR passthrough for MSR_IA32_PRED_CMD and place branch predictor
barriers on switching between VMs to avoid inter VM Spectre-v2 attacks.
[peterz: rebase and changelog rewrite]
[karahmed: - rebase
- vmx: expose PRED_CMD whenever it is available
- svm: only pass through IBPB if it is available]
Cc: Asit Mallick <asit.k.mallick@...el.com>
Cc: Dave Hansen <dave.hansen@...el.com>
Cc: Arjan Van De Ven <arjan.van.de.ven@...el.com>
Cc: Tim Chen <tim.c.chen@...ux.intel.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Andrea Arcangeli <aarcange@...hat.com>
Cc: Andi Kleen <ak@...ux.intel.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Dan Williams <dan.j.williams@...el.com>
Cc: Jun Nakajima <jun.nakajima@...el.com>
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Greg KH <gregkh@...uxfoundation.org>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Signed-off-by: Ashok Raj <ashok.raj@...el.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: http://lkml.kernel.org/r/1515720739-43819-6-git-send-email-ashok.raj@intel.com
Signed-off-by: David Woodhouse <dwmw@...zon.co.uk>
Signed-off-by: KarimAllah Ahmed <karahmed@...zon.de>
---
arch/x86/kvm/svm.c | 14 ++++++++++++++
arch/x86/kvm/vmx.c | 4 ++++
2 files changed, 18 insertions(+)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2744b973..c886e46 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -529,6 +529,7 @@ struct svm_cpu_data {
struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
+ struct vmcb *current_vmcb;
};
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -918,6 +919,9 @@ static void svm_vcpu_init_msrpm(u32 *msrpm)
set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
}
+
+ if (boot_cpu_has(X86_FEATURE_IBPB))
+ set_msr_interception(msrpm, MSR_IA32_PRED_CMD, 1, 1);
}
static void add_msr_offset(u32 offset)
@@ -1706,11 +1710,17 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
+ /*
+ * The vmcb page can be recycled, causing a false negative in
+ * svm_vcpu_load(). So do a full IBPB now.
+ */
+ indirect_branch_prediction_barrier();
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
int i;
if (unlikely(cpu != vcpu->cpu)) {
@@ -1739,6 +1749,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (static_cpu_has(X86_FEATURE_RDTSCP))
wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
+ if (sd->current_vmcb != svm->vmcb) {
+ sd->current_vmcb = svm->vmcb;
+ indirect_branch_prediction_barrier();
+ }
avic_vcpu_load(vcpu, cpu);
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index dac564d..f82a44c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2296,6 +2296,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
+ indirect_branch_prediction_barrier();
}
if (!already_loaded) {
@@ -9613,6 +9614,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_msrs;
msr_bitmap = vmx->vmcs01.msr_bitmap;
+
+ if (boot_cpu_has(X86_FEATURE_IBPB))
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_PRED_CMD, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
--
2.7.4
Powered by blists - more mailing lists