lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240226143630.33643-47-jiangshanlai@gmail.com>
Date: Mon, 26 Feb 2024 22:36:03 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Hou Wenlong <houwenlong.hwl@...group.com>,
	Lai Jiangshan <jiangshan.ljs@...group.com>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Peter Zijlstra <peterz@...radead.org>,
	Sean Christopherson <seanjc@...gle.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Borislav Petkov <bp@...en8.de>,
	Ingo Molnar <mingo@...hat.com>,
	kvm@...r.kernel.org,
	Paolo Bonzini <pbonzini@...hat.com>,
	x86@...nel.org,
	Kees Cook <keescook@...omium.org>,
	Juergen Gross <jgross@...e.com>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	"H. Peter Anvin" <hpa@...or.com>
Subject: [RFC PATCH 46/73] KVM: x86/PVM: Support for CPUID faulting

From: Hou Wenlong <houwenlong.hwl@...group.com>

For PVM, CPUID faulting relies on hardware, so the guest could access
the host CPUID information if CPUID faulting is not enabled. To enable
the guest to access its own CPUID information, introduce a module
parameter to force enable CPUID faulting for the guest.

Suggested-by: Lai Jiangshan <jiangshan.ljs@...group.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@...group.com>
Signed-off-by: Lai Jiangshan <jiangshan.ljs@...group.com>
---
 arch/x86/kvm/pvm/pvm.c | 69 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 69 insertions(+)

diff --git a/arch/x86/kvm/pvm/pvm.c b/arch/x86/kvm/pvm/pvm.c
index e6464095d40b..fd3d6f7301af 100644
--- a/arch/x86/kvm/pvm/pvm.c
+++ b/arch/x86/kvm/pvm/pvm.c
@@ -29,6 +29,9 @@
 MODULE_AUTHOR("AntGroup");
 MODULE_LICENSE("GPL");
 
+static bool __read_mostly enable_cpuid_intercept = 0;
+module_param_named(cpuid_intercept, enable_cpuid_intercept, bool, 0444);
+
 static bool __read_mostly is_intel;
 
 static unsigned long host_idt_base;
@@ -168,6 +171,53 @@ static bool pvm_disallowed_va(struct kvm_vcpu *vcpu, u64 va)
 	return !pvm_guest_allowed_va(vcpu, va);
 }
 
+static void __set_cpuid_faulting(bool on)
+{
+	u64 msrval;
+
+	rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msrval);
+	msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
+	msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
+	wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
+}
+
+static void reset_cpuid_intercept(struct kvm_vcpu *vcpu)
+{
+	if (test_thread_flag(TIF_NOCPUID))
+		return;
+
+	if (enable_cpuid_intercept || cpuid_fault_enabled(vcpu))
+		__set_cpuid_faulting(false);
+}
+
+static void set_cpuid_intercept(struct kvm_vcpu *vcpu)
+{
+	if (test_thread_flag(TIF_NOCPUID))
+		return;
+
+	if (enable_cpuid_intercept || cpuid_fault_enabled(vcpu))
+		__set_cpuid_faulting(true);
+}
+
+static void pvm_update_guest_cpuid_faulting(struct kvm_vcpu *vcpu, u64 data)
+{
+	bool guest_enabled = cpuid_fault_enabled(vcpu);
+	bool set_enabled = data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
+	struct vcpu_pvm *pvm = to_pvm(vcpu);
+
+	if (!(guest_enabled ^ set_enabled))
+		return;
+	if (enable_cpuid_intercept)
+		return;
+	if (test_thread_flag(TIF_NOCPUID))
+		return;
+
+	preempt_disable();
+	if (pvm->loaded_cpu_state)
+		__set_cpuid_faulting(set_enabled);
+	preempt_enable();
+}
+
 // switch_to_smod() and switch_to_umod() switch the mode (smod/umod) and
 // the CR3.  No vTLB flushing when switching the CR3 per PVM Spec.
 static inline void switch_to_smod(struct kvm_vcpu *vcpu)
@@ -335,6 +385,8 @@ static void pvm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 
 	segments_save_host_and_switch_to_guest(pvm);
 
+	set_cpuid_intercept(vcpu);
+
 	kvm_set_user_return_msr(0, (u64)entry_SYSCALL_64_switcher, -1ull);
 	kvm_set_user_return_msr(1, pvm->msr_tsc_aux, -1ull);
 	if (ia32_enabled()) {
@@ -352,6 +404,8 @@ static void pvm_prepare_switch_to_host(struct vcpu_pvm *pvm)
 
 	++pvm->vcpu.stat.host_state_reload;
 
+	reset_cpuid_intercept(&pvm->vcpu);
+
 #ifdef CONFIG_MODIFY_LDT_SYSCALL
 	if (unlikely(current->mm->context.ldt))
 		kvm_load_ldt(GDT_ENTRY_LDT*8);
@@ -937,6 +991,17 @@ static int pvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_IA32_DEBUGCTLMSR:
 		/* It is ignored now. */
 		break;
+	case MSR_MISC_FEATURES_ENABLES:
+		ret = kvm_set_msr_common(vcpu, msr_info);
+		if (!ret)
+			pvm_update_guest_cpuid_faulting(vcpu, data);
+		break;
+	case MSR_PLATFORM_INFO:
+		if ((data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
+		     !boot_cpu_has(X86_FEATURE_CPUID_FAULT))
+			return 1;
+		ret = kvm_set_msr_common(vcpu, msr_info);
+		break;
 	case MSR_PVM_VCPU_STRUCT:
 		if (!PAGE_ALIGNED(data))
 			return 1;
@@ -2925,6 +2990,10 @@ static int __init hardware_cap_check(void)
 		pr_warn("CMPXCHG16B is required for guest.\n");
 		return -EOPNOTSUPP;
 	}
+	if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT) && enable_cpuid_intercept) {
+		pr_warn("Host doesn't support cpuid faulting.\n");
+		return -EOPNOTSUPP;
+	}
 
 	return 0;
 }
-- 
2.19.1.6.gb485710b


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ