lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1457729240-3846-2-git-send-email-dmatlack@google.com>
Date:	Fri, 11 Mar 2016 12:47:20 -0800
From:	David Matlack <dmatlack@...gle.com>
To:	linux-kernel@...r.kernel.org, x86@...nel.org, kvm@...r.kernel.org
Cc:	pbonzini@...hat.com, mingo@...hat.com, luto@...nel.org,
	hpa@...or.com, digitaleric@...gle.com
Subject: [PATCH 1/1] KVM: don't allow irq_fpu_usable when the VCPU's XCR0 is loaded

From: Eric Northup <digitaleric@...gle.com>

Add a percpu boolean, tracking whether a KVM vCPU is running on the
host CPU.  KVM will set and clear it as it loads/unloads guest XCR0.
(Note that the rest of the guest FPU load/restore is safe, because
kvm_load_guest_fpu and kvm_put_guest_fpu call __kernel_fpu_begin()
and __kernel_fpu_end(), respectively.)  irq_fpu_usable() will then
also check for this percpu boolean.
---
 arch/x86/include/asm/i387.h |  3 +++
 arch/x86/kernel/i387.c      | 10 ++++++++--
 arch/x86/kvm/x86.c          |  4 ++++
 3 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index ed8089d..ca2c173 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -14,6 +14,7 @@
 
 #include <linux/sched.h>
 #include <linux/hardirq.h>
+#include <linux/percpu.h>
 
 struct pt_regs;
 struct user_i387_struct;
@@ -25,6 +26,8 @@ extern void math_state_restore(void);
 
 extern bool irq_fpu_usable(void);
 
+DECLARE_PER_CPU(bool, kvm_xcr0_loaded);
+
 /*
  * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
  * and they don't touch the preempt state on their own.
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index b627746..9015828 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -19,6 +19,9 @@
 #include <asm/fpu-internal.h>
 #include <asm/user.h>
 
+DEFINE_PER_CPU(bool, kvm_xcr0_loaded);
+EXPORT_PER_CPU_SYMBOL(kvm_xcr0_loaded);
+
 /*
  * Were we in an interrupt that interrupted kernel mode?
  *
@@ -33,8 +36,11 @@
  */
 static inline bool interrupted_kernel_fpu_idle(void)
 {
-	if (use_eager_fpu())
-		return __thread_has_fpu(current);
+	if (use_eager_fpu()) {
+		/* Preempt already disabled, safe to read percpu. */
+		return __thread_has_fpu(current) &&
+			!__this_cpu_read(kvm_xcr0_loaded);
+	}
 
 	return !__thread_has_fpu(current) &&
 		(read_cr0() & X86_CR0_TS);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d21bce5..f0ba7a1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -557,8 +557,10 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
 
 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
 {
+	BUG_ON(this_cpu_read(kvm_xcr0_loaded) != vcpu->guest_xcr0_loaded);
 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
 			!vcpu->guest_xcr0_loaded) {
+		this_cpu_write(kvm_xcr0_loaded, 1);
 		/* kvm_set_xcr() also depends on this */
 		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
 		vcpu->guest_xcr0_loaded = 1;
@@ -571,7 +573,9 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 		if (vcpu->arch.xcr0 != host_xcr0)
 			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
 		vcpu->guest_xcr0_loaded = 0;
+		this_cpu_write(kvm_xcr0_loaded, 0);
 	}
+	BUG_ON(this_cpu_read(kvm_xcr0_loaded) != vcpu->guest_xcr0_loaded);
 }
 
 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
-- 
2.7.0.rc3.207.g0ac5344

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ