[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250704085027.182163-6-chao.gao@intel.com>
Date: Fri, 4 Jul 2025 01:49:36 -0700
From: Chao Gao <chao.gao@...el.com>
To: kvm@...r.kernel.org,
linux-kernel@...r.kernel.org,
x86@...nel.org,
seanjc@...gle.com,
pbonzini@...hat.com,
dave.hansen@...el.com
Cc: rick.p.edgecombe@...el.com,
mlevitsk@...hat.com,
john.allen@....com,
weijiang.yang@...el.com,
minipli@...ecurity.net,
xin@...or.com,
Chao Gao <chao.gao@...el.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH v11 05/23] KVM: x86: Introduce KVM_{G,S}ET_ONE_REG uAPIs support
From: Yang Weijiang <weijiang.yang@...el.com>
Enable KVM_{G,S}ET_ONE_REG uAPIs so that userspace can access HW MSR or
KVM synthetic MSR through it.
In CET KVM series [1], KVM "steals" an MSR from PV MSR space and access
it via KVM_{G,S}ET_MSRs uAPIs, but the approach pollutes PV MSR space
and hides the difference of synthetic MSRs and normal HW defined MSRs.
Now carve out a separate room in KVM-customized MSR address space for
synthetic MSRs. The synthetic MSRs are not exposed to userspace via
KVM_GET_MSR_INDEX_LIST, instead userspace complies with KVM's setup and
composes the uAPI params. KVM synthetic MSR indices start from 0 and
increase linearly. Userspace caller should tag MSR type correctly in
order to access intended HW or synthetic MSR.
Suggested-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Yang Weijiang <weijiang.yang@...el.com>
Signed-off-by: Chao Gao <chao.gao@...el.com>
Link: https://lore.kernel.org/all/20240219074733.122080-18-weijiang.yang@intel.com/ [1]
---
arch/x86/include/uapi/asm/kvm.h | 10 +++++
arch/x86/kvm/x86.c | 66 +++++++++++++++++++++++++++++++++
2 files changed, 76 insertions(+)
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 0f15d683817d..e72d9e6c1739 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -411,6 +411,16 @@ struct kvm_xcrs {
__u64 padding[16];
};
+#define KVM_X86_REG_MSR (1 << 2)
+#define KVM_X86_REG_SYNTHETIC (1 << 3)
+
+struct kvm_x86_reg_id {
+ __u32 index;
+ __u8 type;
+ __u8 rsvd;
+ __u16 rsvd16;
+};
+
#define KVM_SYNC_X86_REGS (1UL << 0)
#define KVM_SYNC_X86_SREGS (1UL << 1)
#define KVM_SYNC_X86_EVENTS (1UL << 2)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b9d1c84f0794..e5c2bf4a90e6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2237,6 +2237,31 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
return kvm_set_msr_ignored_check(vcpu, index, *data, true);
}
+static int kvm_get_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *value)
+{
+ u64 val;
+ int r;
+
+ r = do_get_msr(vcpu, msr, &val);
+ if (r)
+ return r;
+
+ if (put_user(val, value))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int kvm_set_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *value)
+{
+ u64 val;
+
+ if (get_user(val, value))
+ return -EFAULT;
+
+ return do_set_msr(vcpu, msr, &val);
+}
+
#ifdef CONFIG_X86_64
struct pvclock_clock {
int vclock_mode;
@@ -5896,6 +5921,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
}
}
+static int kvm_translate_synthetic_msr(struct kvm_x86_reg_id *reg)
+{
+ return -EINVAL;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -6012,6 +6042,42 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
+ case KVM_GET_ONE_REG:
+ case KVM_SET_ONE_REG: {
+ struct kvm_x86_reg_id *id;
+ struct kvm_one_reg reg;
+ u64 __user *value;
+
+ r = -EFAULT;
+ if (copy_from_user(®, argp, sizeof(reg)))
+ break;
+
+ r = -EINVAL;
+ id = (struct kvm_x86_reg_id *)®.id;
+ if (id->rsvd || id->rsvd16)
+ break;
+
+ if (id->type != KVM_X86_REG_MSR &&
+ id->type != KVM_X86_REG_SYNTHETIC)
+ break;
+
+ if (id->type == KVM_X86_REG_SYNTHETIC) {
+ r = kvm_translate_synthetic_msr(id);
+ if (r)
+ break;
+ }
+
+ r = -EINVAL;
+ if (id->type != KVM_X86_REG_MSR)
+ break;
+
+ value = u64_to_user_ptr(reg.addr);
+ if (ioctl == KVM_GET_ONE_REG)
+ r = kvm_get_one_msr(vcpu, id->index, value);
+ else
+ r = kvm_set_one_msr(vcpu, id->index, value);
+ break;
+ }
case KVM_TPR_ACCESS_REPORTING: {
struct kvm_tpr_access_ctl tac;
--
2.47.1
Powered by blists - more mailing lists