lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 11 Mar 2016 12:59:33 +0100
From:	Juergen Gross <jgross@...e.com>
To:	linux-kernel@...r.kernel.org, xen-devel@...ts.xenproject.org
Cc:	konrad.wilk@...cle.com, boris.ostrovsky@...cle.com,
	david.vrabel@...rix.com, mingo@...hat.com, peterz@...radead.org,
	Douglas_Warzecha@...l.com, pali.rohar@...il.com, jdelvare@...e.com,
	linux@...ck-us.net, tglx@...utronix.de, hpa@...or.com,
	x86@...nel.org, Juergen Gross <jgross@...e.com>
Subject: [PATCH 5/6] virt, sched: add cpu pinning to call_sync_on_phys_cpu()

Add generic virtualization support for pinning the current vcpu to a
specified physical cpu. As this operation isn't performance critical
(a very limited set of operations like BIOS calls and SMIs is expected
to need this) just add a hypervisor specific indirection.

Such a pinning should last as short as possible as it might block
sensible vcpu scheduling and maybe other hypervisor functions like
suspending the system which rely on scheduling. To ensure this don't
let the current thread be preempted while the vcpu is pinned in
call_sync_on_phys_cpu().

Signed-off-by: Juergen Gross <jgross@...e.com>
---
 arch/x86/include/asm/hypervisor.h |  9 +++++++++
 include/linux/sched.h             | 24 +++++++++++++++++++++++-
 kernel/sched/core.c               |  4 ++++
 3 files changed, 36 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 055ea99..13f80a2 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -43,6 +43,9 @@ struct hypervisor_x86 {
 
 	/* X2APIC detection (run once per boot) */
 	bool		(*x2apic_available)(void);
+
+	/* pin current vcpu to specified physical cpu (run rarely) */
+	void		(*pin_vcpu)(int);
 };
 
 extern const struct hypervisor_x86 *x86_hyper;
@@ -56,6 +59,12 @@ extern const struct hypervisor_x86 x86_hyper_kvm;
 extern void init_hypervisor(struct cpuinfo_x86 *c);
 extern void init_hypervisor_platform(void);
 extern bool hypervisor_x2apic_available(void);
+
+static inline void hypervisor_pin_vcpu(int cpu)
+{
+	if (x86_hyper->pin_vcpu)
+		x86_hyper->pin_vcpu(cpu);
+}
 #else
 static inline void init_hypervisor(struct cpuinfo_x86 *c) { }
 static inline void init_hypervisor_platform(void) { }
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dfadf1a..53b33d5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -61,6 +61,9 @@ struct sched_param {
 #include <linux/cgroup-defs.h>
 
 #include <asm/processor.h>
+#ifdef CONFIG_HYPERVISOR_GUEST
+#include <asm/hypervisor.h>
+#endif
 
 #define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
 
@@ -2230,6 +2233,17 @@ static inline void rcu_copy_process(struct task_struct *p)
 #endif /* #ifdef CONFIG_TASKS_RCU */
 }
 
+#ifdef CONFIG_HYPERVISOR_GUEST
+static inline void pin_vcpu(int cpu)
+{
+	hypervisor_pin_vcpu(cpu);
+}
+#else
+static inline void pin_vcpu(int cpu)
+{
+}
+#endif
+
 static inline void tsk_restore_flags(struct task_struct *task,
 				unsigned long orig_flags, unsigned long flags)
 {
@@ -2263,10 +2277,18 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
 static inline int call_sync_on_phys_cpu(unsigned cpu, int (*func)(void *),
 					void *par)
 {
+	int ret;
+
 	if (cpu != 0)
 		return -EINVAL;
 
-	return func(par);
+	preempt_disable();
+	pin_vcpu(0);
+	ret = func(par);
+	pin_vcpu(-1);
+	preempt_enable();
+
+	return ret;
 }
 #endif
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cb9955f..2dc27ca 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1281,7 +1281,11 @@ int call_sync_on_phys_cpu(unsigned cpu, int (*func)(void *), void *par)
 	if (ret)
 		goto out;
 
+	preempt_disable();
+	pin_vcpu(cpu);
 	ret = func(par);
+	pin_vcpu(-1);
+	preempt_enable();
 
 	set_cpus_allowed_ptr(current, old_mask);
 
-- 
2.6.2

Powered by blists - more mailing lists