lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 29 Nov 2022 22:23:07 -1000
From:   Tejun Heo <tj@...nel.org>
To:     torvalds@...ux-foundation.org, mingo@...hat.com,
        peterz@...radead.org, juri.lelli@...hat.com,
        vincent.guittot@...aro.org, dietmar.eggemann@....com,
        rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
        bristot@...hat.com, vschneid@...hat.com, ast@...nel.org,
        daniel@...earbox.net, andrii@...nel.org, martin.lau@...nel.org,
        joshdon@...gle.com, brho@...gle.com, pjt@...gle.com,
        derkling@...gle.com, haoluo@...gle.com, dvernet@...a.com,
        dschatzberg@...a.com, dskarlat@...cmu.edu, riel@...riel.com
Cc:     linux-kernel@...r.kernel.org, bpf@...r.kernel.org,
        kernel-team@...a.com, Tejun Heo <tj@...nel.org>
Subject: [PATCH 25/31] sched_ext: Implement SCX_KICK_WAIT

From: David Vernet <dvernet@...a.com>

If set when calling scx_bpf_kick_cpu(), the invoking CPU will busy wait for
the kicked cpu to enter the scheduler. This will be used to improve the
exclusion guarantees in scx_example_pair.

Signed-off-by: David Vernet <dvernet@...a.com>
Reviewed-by: Tejun Heo <tj@...nel.org>
Signed-off-by: Tejun Heo <tj@...nel.org>
Acked-by: Josh Don <joshdon@...gle.com>
Acked-by: Hao Luo <haoluo@...gle.com>
Acked-by: Barret Rhoden <brho@...gle.com>
---
 kernel/sched/core.c  |  4 +++-
 kernel/sched/ext.c   | 36 ++++++++++++++++++++++++++++++++++--
 kernel/sched/ext.h   | 20 ++++++++++++++++++++
 kernel/sched/sched.h |  2 ++
 4 files changed, 59 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 79560641a61f..ea4f6edfcf32 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5886,8 +5886,10 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
 	for_each_active_class(class) {
 		p = class->pick_next_task(rq);
-		if (p)
+		if (p) {
+			scx_notify_pick_next_task(rq, p, class);
 			return p;
+		}
 	}
 
 	BUG(); /* The idle class should always have a runnable task. */
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index bd03b55fbcf5..aeaad3d8b05a 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -109,8 +109,9 @@ unsigned long last_timeout_check = INITIAL_JIFFIES;
 
 static struct delayed_work check_timeout_work;
 
-/* idle tracking */
 #ifdef CONFIG_SMP
+
+/* idle tracking */
 #ifdef CONFIG_CPUMASK_OFFSTACK
 #define CL_ALIGNED_IF_ONSTACK
 #else
@@ -123,7 +124,11 @@ static struct {
 } idle_masks CL_ALIGNED_IF_ONSTACK;
 
 static bool __cacheline_aligned_in_smp has_idle_cpus;
-#endif
+
+/* for %SCX_KICK_WAIT */
+static u64 __percpu *kick_cpus_pnt_seqs;
+
+#endif	/* CONFIG_SMP */
 
 /*
  * Direct dispatch marker.
@@ -2959,6 +2964,7 @@ static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
 {
 	struct rq *this_rq = this_rq();
+	u64 *pseqs = this_cpu_ptr(kick_cpus_pnt_seqs);
 	int this_cpu = cpu_of(this_rq);
 	int cpu;
 
@@ -2972,14 +2978,32 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work)
 			if (cpumask_test_cpu(cpu, this_rq->scx.cpus_to_preempt) &&
 			    rq->curr->sched_class == &ext_sched_class)
 				rq->curr->scx.slice = 0;
+			pseqs[cpu] = rq->scx.pnt_seq;
 			resched_curr(rq);
+		} else {
+			cpumask_clear_cpu(cpu, this_rq->scx.cpus_to_wait);
 		}
 
 		raw_spin_rq_unlock_irqrestore(rq, flags);
 	}
 
+	for_each_cpu_andnot(cpu, this_rq->scx.cpus_to_wait,
+			    cpumask_of(this_cpu)) {
+		/*
+		 * Pairs with smp_store_release() issued by this CPU in
+		 * scx_notify_pick_next_task() on the resched path.
+		 *
+		 * We busy-wait here to guarantee that no other task can be
+		 * scheduled on our core before the target CPU has entered the
+		 * resched path.
+		 */
+		while (smp_load_acquire(&cpu_rq(cpu)->scx.pnt_seq) == pseqs[cpu])
+			cpu_relax();
+	}
+
 	cpumask_clear(this_rq->scx.cpus_to_kick);
 	cpumask_clear(this_rq->scx.cpus_to_preempt);
+	cpumask_clear(this_rq->scx.cpus_to_wait);
 }
 #endif
 
@@ -2999,6 +3023,11 @@ void __init init_sched_ext_class(void)
 #ifdef CONFIG_SMP
 	BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
 	BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
+
+	kick_cpus_pnt_seqs = __alloc_percpu(sizeof(kick_cpus_pnt_seqs[0]) *
+					    num_possible_cpus(),
+					    __alignof__(kick_cpus_pnt_seqs[0]));
+	BUG_ON(!kick_cpus_pnt_seqs);
 #endif
 	for_each_possible_cpu(cpu) {
 		struct rq *rq = cpu_rq(cpu);
@@ -3009,6 +3038,7 @@ void __init init_sched_ext_class(void)
 #ifdef CONFIG_SMP
 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
+		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
 		init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
 #endif
 	}
@@ -3228,6 +3258,8 @@ void scx_bpf_kick_cpu(s32 cpu, u64 flags)
 		cpumask_set_cpu(cpu, rq->scx.cpus_to_kick);
 		if (flags & SCX_KICK_PREEMPT)
 			cpumask_set_cpu(cpu, rq->scx.cpus_to_preempt);
+		if (flags & SCX_KICK_WAIT)
+			cpumask_set_cpu(cpu, rq->scx.cpus_to_wait);
 
 		irq_work_queue(&rq->scx.kick_cpus_irq_work);
 		preempt_enable();
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
index 470b2224cdfa..8ae717c5e850 100644
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -66,6 +66,7 @@ enum scx_tg_flags {
 
 enum scx_kick_flags {
 	SCX_KICK_PREEMPT	= 1LLU << 0,	/* force scheduling on the CPU */
+	SCX_KICK_WAIT		= 1LLU << 1,	/* wait for the CPU to be rescheduled */
 };
 
 #ifdef CONFIG_SCHED_CLASS_EXT
@@ -107,6 +108,22 @@ __printf(2, 3) void scx_ops_error_type(enum scx_exit_type type,
 #define scx_ops_error(fmt, args...)						\
 	scx_ops_error_type(SCX_EXIT_ERROR, fmt, ##args)
 
+static inline void scx_notify_pick_next_task(struct rq *rq,
+					     const struct task_struct *p,
+					     const struct sched_class *active)
+{
+#ifdef CONFIG_SMP
+	if (!scx_enabled())
+		return;
+	/*
+	 * Pairs with the smp_load_acquire() issued by a CPU in
+	 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
+	 * resched.
+	 */
+	smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
+#endif
+}
+
 static inline void scx_notify_sched_tick(void)
 {
 	unsigned long last_check, timeout;
@@ -164,6 +181,9 @@ static inline int scx_check_setscheduler(struct task_struct *p,
 					 int policy) { return 0; }
 static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
 static inline void init_sched_ext_class(void) {}
+static inline void scx_notify_pick_next_task(struct rq *rq,
+					     const struct task_struct *p,
+					     const struct sched_class *active) {}
 static inline void scx_notify_sched_tick(void) {}
 
 #define for_each_active_class		for_each_class
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a2ffa94ede02..5af758cc1e38 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -714,6 +714,8 @@ struct scx_rq {
 #ifdef CONFIG_SMP
 	cpumask_var_t		cpus_to_kick;
 	cpumask_var_t		cpus_to_preempt;
+	cpumask_var_t		cpus_to_wait;
+	u64			pnt_seq;
 	struct irq_work		kick_cpus_irq_work;
 #endif
 };
-- 
2.38.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ