lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZjLE7giCsEI4Sftp@google.com>
Date: Wed, 1 May 2024 15:40:46 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Yang Weijiang <weijiang.yang@...el.com>
Cc: pbonzini@...hat.com, dave.hansen@...el.com, x86@...nel.org, 
	kvm@...r.kernel.org, linux-kernel@...r.kernel.org, peterz@...radead.org, 
	chao.gao@...el.com, rick.p.edgecombe@...el.com, mlevitsk@...hat.com, 
	john.allen@....com
Subject: Re: [PATCH v10 17/27] KVM: x86: Report KVM supported CET MSRs as to-be-saved

On Sun, Feb 18, 2024, Yang Weijiang wrote:
> Add CET MSRs to the list of MSRs reported to userspace if the feature,
> i.e. IBT or SHSTK, associated with the MSRs is supported by KVM.
> 
> SSP can only be read via RDSSP. Writing even requires destructive and
> potentially faulting operations such as SAVEPREVSSP/RSTORSSP or
> SETSSBSY/CLRSSBSY. Let the host use a pseudo-MSR that is just a wrapper
> for the GUEST_SSP field of the VMCS.
> 
> Suggested-by: Chao Gao <chao.gao@...el.com>
> Signed-off-by: Yang Weijiang <weijiang.yang@...el.com>
> ---
>  arch/x86/include/uapi/asm/kvm_para.h |  1 +
>  arch/x86/kvm/vmx/vmx.c               |  2 ++
>  arch/x86/kvm/x86.c                   | 18 ++++++++++++++++++
>  3 files changed, 21 insertions(+)
> 
> diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
> index 605899594ebb..9d08c0bec477 100644
> --- a/arch/x86/include/uapi/asm/kvm_para.h
> +++ b/arch/x86/include/uapi/asm/kvm_para.h
> @@ -58,6 +58,7 @@
>  #define MSR_KVM_ASYNC_PF_INT	0x4b564d06
>  #define MSR_KVM_ASYNC_PF_ACK	0x4b564d07
>  #define MSR_KVM_MIGRATION_CONTROL	0x4b564d08
> +#define MSR_KVM_SSP	0x4b564d09

We never resolved the conservation from v6[*], but I still agree with Maxim's
view that defining a synthetic MSR, which "steals" an MSR from KVM's MSR address
space, is a bad idea.

And I still also think that KVM_SET_ONE_REG is the best way forward.  Completely
untested, but I think this is all that is needed to wire up KVM_{G,S}ET_ONE_REG
to support MSRs, and carve out room for 250+ other register types, plus room for
more future stuff as needed.

We'll still need a KVM-defined MSR for SSP, but it can be KVM internal, not uAPI,
e.g. the "index" exposed to userspace can simply be '0' for a register type of
KVM_X86_REG_SYNTHETIC_MSR, and then the translated internal index can be any
value that doesn't conflict.

diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index ef11aa4cab42..ca2a47a85fa1 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -410,6 +410,16 @@ struct kvm_xcrs {
        __u64 padding[16];
 };
 
+#define KVM_X86_REG_MSR                        (1 << 2)
+#define KVM_X86_REG_SYNTHETIC_MSR      (1 << 3)
+
+struct kvm_x86_reg_id {
+       __u32 index;
+       __u8 type;
+       __u8 rsvd;
+       __u16 rsvd16;
+};
+
 #define KVM_SYNC_X86_REGS      (1UL << 0)
 #define KVM_SYNC_X86_SREGS     (1UL << 1)
 #define KVM_SYNC_X86_EVENTS    (1UL << 2)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 47d9f03b7778..53f2b43b4651 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2244,6 +2244,30 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        return kvm_set_msr_ignored_check(vcpu, index, *data, true);
 }
 
+static int kvm_get_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *value)
+{
+       u64 val;
+
+       r = do_get_msr(vcpu, reg.index, &val);
+       if (r)
+               return r;
+
+       if (put_user(val, value);
+               return -EFAULT;
+
+       return 0;
+}
+
+static int kvm_set_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *value)
+{
+       u64 val;
+
+       if (get_user(val, value);
+               return -EFAULT;
+
+       return do_set_msr(vcpu, reg.index, &val);
+}
+
 #ifdef CONFIG_X86_64
 struct pvclock_clock {
        int vclock_mode;
@@ -5976,6 +6000,39 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
        }
+       case KVM_GET_ONE_REG:
+       case KVM_SET_ONE_REG: {
+               struct kvm_x86_reg_id id;
+               struct kvm_one_reg reg;
+               u64 __user *value;
+
+               r = -EFAULT;
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       break;
+
+               r = -EINVAL;
+               id = (struct kvm_x86_reg)reg->id;
+               if (id.rsvd || id.rsvd16)
+                       break;
+
+               if (id.type != KVM_X86_REG_MSR &&
+                   id.type != KVM_X86_REG_SYNTHETIC_MSR)
+                       break;
+
+               if (id.type == KVM_X86_REG_SYNTHETIC_MSR) {
+                       id.type = KVM_X86_REG_MSR;
+                       r = kvm_translate_synthetic_msr(&id.index);
+                       if (r)
+                               break;
+               }
+
+               value = u64_to_user_ptr(reg.addr);
+               if (ioctl == KVM_GET_ONE_REG)
+                       r = kvm_get_one_msr(vcpu, id.index, value);
+               else
+                       r = kvm_set_one_msr(vcpu, id.index, value);
+               break;
+       }
        case KVM_TPR_ACCESS_REPORTING: {
                struct kvm_tpr_access_ctl tac;
 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ