[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180222170717.GP31483@char.us.oracle.com>
Date: Thu, 22 Feb 2018 12:07:17 -0500
From: Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org, x86@...nel.org,
Radim Krčmář <rkrcmar@...hat.com>,
KarimAllah Ahmed <karahmed@...zon.de>,
David Woodhouse <dwmw@...zon.co.uk>,
Jim Mattson <jmattson@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...nel.org>, stable@...r.kernel.org
Subject: Re: [PATCH 1/3] KVM: x86: use native MSR ops for SPEC_CTRL
On Wed, Feb 21, 2018 at 10:41:35PM +0100, Paolo Bonzini wrote:
> Having a paravirt indirect call in the IBRS restore path is not a
> good idea, since we are trying to protect from speculative execution
> of bogus indirect branch targets. It is also slower, so use
> native_wrmsrl on the vmentry path too.
But it gets replaced during patching. As in once the machine boots
the assembler changes from:
callq *0xfffflbah
to
wrmsr
? I don't think you need this patch.
>
> Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d
> Cc: x86@...nel.org
> Cc: Radim Krčmář <rkrcmar@...hat.com>
> Cc: KarimAllah Ahmed <karahmed@...zon.de>
> Cc: David Woodhouse <dwmw@...zon.co.uk>
> Cc: Jim Mattson <jmattson@...gle.com>
> Cc: Thomas Gleixner <tglx@...utronix.de>
> Cc: Ingo Molnar <mingo@...nel.org>
> Cc: stable@...r.kernel.org
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> arch/x86/kvm/svm.c | 7 ++++---
> arch/x86/kvm/vmx.c | 7 ++++---
> 2 files changed, 8 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index b3e488a74828..1598beeda11c 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -49,6 +49,7 @@
> #include <asm/debugreg.h>
> #include <asm/kvm_para.h>
> #include <asm/irq_remapping.h>
> +#include <asm/microcode.h>
> #include <asm/nospec-branch.h>
>
> #include <asm/virtext.h>
> @@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
> * being speculatively taken.
> */
> if (svm->spec_ctrl)
> - wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
> + native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
>
> asm volatile (
> "push %%" _ASM_BP "; \n\t"
> @@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
> * save it.
> */
> if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
> - rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
> + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
>
> if (svm->spec_ctrl)
> - wrmsrl(MSR_IA32_SPEC_CTRL, 0);
> + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
>
> /* Eliminate branch target predictions from guest mode */
> vmexit_fill_RSB();
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 67b028d8e726..5caeb8dc5bda 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -51,6 +51,7 @@
> #include <asm/apic.h>
> #include <asm/irq_remapping.h>
> #include <asm/mmu_context.h>
> +#include <asm/microcode.h>
> #include <asm/nospec-branch.h>
>
> #include "trace.h"
> @@ -9453,7 +9454,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
> * being speculatively taken.
> */
> if (vmx->spec_ctrl)
> - wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
> + native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
>
> vmx->__launched = vmx->loaded_vmcs->launched;
> asm(
> @@ -9589,10 +9590,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
> * save it.
> */
> if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
> - rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
> + vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
>
> if (vmx->spec_ctrl)
> - wrmsrl(MSR_IA32_SPEC_CTRL, 0);
> + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
>
> /* Eliminate branch target predictions from guest mode */
> vmexit_fill_RSB();
> --
> 1.8.3.1
>
>
Powered by blists - more mailing lists