lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALMp9eRgzhoyKWvxBjEL=KrRqGcHvMr3qB2cWDakT92xMZusGg@mail.gmail.com>
Date:   Mon, 29 Jan 2018 11:03:04 -0800
From:   Jim Mattson <jmattson@...gle.com>
To:     KarimAllah Ahmed <karahmed@...zon.de>
Cc:     kvm list <kvm@...r.kernel.org>,
        LKML <linux-kernel@...r.kernel.org>,
        "the arch/x86 maintainers" <x86@...nel.org>,
        Asit Mallick <asit.k.mallick@...el.com>,
        Arjan Van De Ven <arjan.van.de.ven@...el.com>,
        Dave Hansen <dave.hansen@...el.com>,
        Andi Kleen <ak@...ux.intel.com>,
        Andrea Arcangeli <aarcange@...hat.com>,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Tim Chen <tim.c.chen@...ux.intel.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Dan Williams <dan.j.williams@...el.com>,
        Jun Nakajima <jun.nakajima@...el.com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        David Woodhouse <dwmw@...zon.co.uk>,
        Greg KH <gregkh@...uxfoundation.org>,
        Andy Lutomirski <luto@...nel.org>,
        Ashok Raj <ashok.raj@...el.com>
Subject: Re: [PATCH v2 2/4] x86: vmx: Allow direct access to MSR_IA32_SPEC_CTRL

On Sun, Jan 28, 2018 at 4:58 PM, KarimAllah Ahmed <karahmed@...zon.de> wrote:
> Add direct access to MSR_IA32_SPEC_CTRL for guests. This is needed for
> guests that will only mitigate Spectre V2 through IBRS+IBPB and will not
> be using a retpoline+IBPB based approach.
>
> To avoid the overhead of atomically saving and restoring the
> MSR_IA32_SPEC_CTRL for guests that do not actually use the MSR, only
> add_atomic_switch_msr when a non-zero is written to it.
>
> Cc: Asit Mallick <asit.k.mallick@...el.com>
> Cc: Arjan Van De Ven <arjan.van.de.ven@...el.com>
> Cc: Dave Hansen <dave.hansen@...el.com>
> Cc: Andi Kleen <ak@...ux.intel.com>
> Cc: Andrea Arcangeli <aarcange@...hat.com>
> Cc: Linus Torvalds <torvalds@...ux-foundation.org>
> Cc: Tim Chen <tim.c.chen@...ux.intel.com>
> Cc: Thomas Gleixner <tglx@...utronix.de>
> Cc: Dan Williams <dan.j.williams@...el.com>
> Cc: Jun Nakajima <jun.nakajima@...el.com>
> Cc: Paolo Bonzini <pbonzini@...hat.com>
> Cc: David Woodhouse <dwmw@...zon.co.uk>
> Cc: Greg KH <gregkh@...uxfoundation.org>
> Cc: Andy Lutomirski <luto@...nel.org>
> Cc: Ashok Raj <ashok.raj@...el.com>
> Signed-off-by: KarimAllah Ahmed <karahmed@...zon.de>
>
> ---
> v2:
> - remove 'host_spec_ctrl' in favor of only a comment (dwmw@).
> - special case writing '0' in SPEC_CTRL to avoid confusing live-migration
>   when the instance never used the MSR (dwmw@).
> - depend on X86_FEATURE_IBRS instead of X86_FEATURE_SPEC_CTRL (dwmw@).
> - add MSR_IA32_SPEC_CTRL to the list of MSRs to save (dropped it by accident).
> ---
>  arch/x86/kvm/cpuid.c |  4 +++-
>  arch/x86/kvm/vmx.c   | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/x86.c   |  1 +
>  3 files changed, 69 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 0099e10..32c0c14 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -70,6 +70,7 @@ u64 kvm_supported_xcr0(void)
>  /* These are scattered features in cpufeatures.h. */
>  #define KVM_CPUID_BIT_AVX512_4VNNIW     2
>  #define KVM_CPUID_BIT_AVX512_4FMAPS     3
> +#define KVM_CPUID_BIT_IBRS              26
>  #define KF(x) bit(KVM_CPUID_BIT_##x)
>
>  int kvm_update_cpuid(struct kvm_vcpu *vcpu)
> @@ -392,7 +393,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
>
>         /* cpuid 7.0.edx*/
>         const u32 kvm_cpuid_7_0_edx_x86_features =
> -               KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS);
> +               KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS) | \
> +               (boot_cpu_has(X86_FEATURE_IBRS) ? KF(IBRS) : 0);
>
>         /* all calls to cpuid_count() should be made on the same cpu */
>         get_cpu();
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index aa8638a..dac564d 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -920,6 +920,8 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
>  static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
>                                             u16 error_code);
>  static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
> +static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
> +                                                         u32 msr, int type);
>
>  static DEFINE_PER_CPU(struct vmcs *, vmxarea);
>  static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
> @@ -2007,6 +2009,28 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>         m->host[i].value = host_val;
>  }
>
> +/* do not touch guest_val and host_val if the msr is not found */
> +static int read_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
> +                                 u64 *guest_val, u64 *host_val)
> +{
> +       unsigned i;
> +       struct msr_autoload *m = &vmx->msr_autoload;
> +
> +       for (i = 0; i < m->nr; ++i)
> +               if (m->guest[i].index == msr)
> +                       break;
> +
> +       if (i == m->nr)
> +               return 1;
> +
> +       if (guest_val)
> +               *guest_val = m->guest[i].value;
> +       if (host_val)
> +               *host_val = m->host[i].value;
> +
> +       return 0;
> +}
> +
>  static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
>  {
>         u64 guest_efer = vmx->vcpu.arch.efer;
> @@ -3203,7 +3227,9 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
>   */
>  static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  {
> +       u64 spec_ctrl = 0;
>         struct shared_msr_entry *msr;
> +       struct vcpu_vmx *vmx = to_vmx(vcpu);
>
>         switch (msr_info->index) {
>  #ifdef CONFIG_X86_64
> @@ -3223,6 +3249,20 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>         case MSR_IA32_TSC:
>                 msr_info->data = guest_read_tsc(vcpu);
>                 break;
> +       case MSR_IA32_SPEC_CTRL:
> +               if (!msr_info->host_initiated &&
> +                   !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
> +                       return 1;
> +
> +               /*
> +                * If the MSR is not in the atomic list yet, then the guest
> +                * never wrote a non-zero value to it yet i.e. the MSR value is
> +                * '0'.
> +                */
> +               read_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL, &spec_ctrl, NULL);
> +
> +               msr_info->data = spec_ctrl;
> +               break;
>         case MSR_IA32_SYSENTER_CS:
>                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
>                 break;
> @@ -3289,6 +3329,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>         int ret = 0;
>         u32 msr_index = msr_info->index;
>         u64 data = msr_info->data;
> +       unsigned long *msr_bitmap;
>
>         switch (msr_index) {
>         case MSR_EFER:
> @@ -3330,6 +3371,30 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>         case MSR_IA32_TSC:
>                 kvm_write_tsc(vcpu, msr_info);
>                 break;
> +       case MSR_IA32_SPEC_CTRL:
> +               if (!msr_info->host_initiated &&
> +                   !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
> +                       return 1;
> +
> +               if (!msr_info->data)
> +                       break;

What if the MSR has already been set? Doesn't this throw away the new
zero value? (e.g. on vcpu reset)?

> +
> +               /*
> +                * Now we know that the guest is actually using the MSR, so
> +                * atomically load and save the SPEC_CTRL MSR and pass it
> +                * through to the guest.
> +                *
> +                * NOTE:
> +                * IBRS is not supported yet as a mitigation for the host. Once
> +                * it is supported, the "host_value" will need to be '1'
> +                * instead of '0' if IBRS is used also by the host.
> +                */
> +               add_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL, msr_info->data, 0);
> +
> +               msr_bitmap = vmx->vmcs01.msr_bitmap;
> +               vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
> +
> +               break;
>         case MSR_IA32_CR_PAT:
>                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
>                         if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 03869eb..cabaad3 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1006,6 +1006,7 @@ static u32 msrs_to_save[] = {
>  #endif
>         MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
>         MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
> +       MSR_IA32_SPEC_CTRL
>  };
>
>  static unsigned num_msrs_to_save;
> --
> 2.7.4
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ