lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <2d771c1f293845e09edf73f5db5b2837@honor.com>
Date: Tue, 10 Jun 2025 08:59:45 +0000
From: liuwenfang <liuwenfang@...or.com>
To: 'Tejun Heo' <tj@...nel.org>
CC: 'David Vernet' <void@...ifault.com>, 'Andrea Righi' <arighi@...dia.com>,
	'Changwoo Min' <changwoo@...lia.com>, 'Ingo Molnar' <mingo@...hat.com>,
	'Peter Zijlstra' <peterz@...radead.org>, 'Juri Lelli'
	<juri.lelli@...hat.com>, 'Vincent Guittot' <vincent.guittot@...aro.org>,
	'Dietmar Eggemann' <dietmar.eggemann@....com>, 'Steven Rostedt'
	<rostedt@...dmis.org>, 'Ben Segall' <bsegall@...gle.com>, 'Mel Gorman'
	<mgorman@...e.de>, 'Valentin Schneider' <vschneid@...hat.com>,
	"'linux-kernel@...r.kernel.org'" <linux-kernel@...r.kernel.org>
Subject: [PATCH] sched_ext: introduce cpu tick

Assume one CPU is running one RT task and one runnable scx task on
its local dsq, the scx task cannot be scheduled until RT task enters
sleep, if RT task will run for 100ms, the scx task should be migrated
to other dsqs, then it can have a chance to be scheduled by other CPUs.

So cpu_tick is added to notitfy BPF scheduler to check long runnable
scx on its local dsq, related policy can be taken to improve the
performance.

Signed-off-by: liuwenfang liuwenfang@...or.com
---
 kernel/sched/ext.c | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index f5133249f..2232f616c 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -337,6 +337,16 @@ struct sched_ext_ops {
 	 */
 	void (*tick)(struct task_struct *p);
 
+	/**
+	 * @tick: Periodic tick
+	 * @rq: current CPU's rq
+	 *
+	 * This operation is called every 1/HZ seconds on each CPU which is
+	 * not idle. Notify BPF scheduler to take policy for runnable tasks
+	 * on local dsq.
+	 */
+	void (*cpu_tick)(struct rq *rq);
+
 	/**
 	 * @runnable: A task is becoming runnable on its associated CPU
 	 * @p: task becoming runnable
@@ -3569,6 +3579,9 @@ void scx_tick(struct rq *rq)
 	}
 
 	update_other_load_avgs(rq);
+
+	if (SCX_HAS_OP(cpu_tick))
+		SCX_CALL_OP(SCX_KF_REST, cpu_tick, rq);
 }
 
 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
@@ -5753,6 +5766,7 @@ static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
 static void sched_ext_ops__tick(struct task_struct *p) {}
+static void sched_ext_ops__cpu_tick(struct rq *rq) {}
 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
 static void sched_ext_ops__running(struct task_struct *p) {}
 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
@@ -5790,6 +5804,7 @@ static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
 	.dequeue		= sched_ext_ops__dequeue,
 	.dispatch		= sched_ext_ops__dispatch,
 	.tick			= sched_ext_ops__tick,
+	.cpu_tick		= sched_ext_ops__cpu_tick,
 	.runnable		= sched_ext_ops__runnable,
 	.running		= sched_ext_ops__running,
 	.stopping		= sched_ext_ops__stopping,
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ