[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230317213333.2174969-28-tj@kernel.org>
Date: Fri, 17 Mar 2023 11:33:28 -1000
From: Tejun Heo <tj@...nel.org>
To: torvalds@...ux-foundation.org, mingo@...hat.com,
peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, vschneid@...hat.com, ast@...nel.org,
daniel@...earbox.net, andrii@...nel.org, martin.lau@...nel.org,
joshdon@...gle.com, brho@...gle.com, pjt@...gle.com,
derkling@...gle.com, haoluo@...gle.com, dvernet@...a.com,
dschatzberg@...a.com, dskarlat@...cmu.edu, riel@...riel.com
Cc: linux-kernel@...r.kernel.org, bpf@...r.kernel.org,
kernel-team@...a.com, Tejun Heo <tj@...nel.org>
Subject: [PATCH 27/32] sched_ext: Implement sched_ext_ops.cpu_online/offline()
Add ops.cpu_online/offline() which are invoked when CPUs come online and
offline respectively. As the enqueue path already automatically bypasses
tasks to the local dsq on a deactivated CPU, BPF schedulers are guaranteed
to see tasks only on CPUs which are between online() and offline().
Signed-off-by: Tejun Heo <tj@...nel.org>
Reviewed-by: David Vernet <dvernet@...a.com>
Acked-by: Josh Don <joshdon@...gle.com>
Acked-by: Hao Luo <haoluo@...gle.com>
Acked-by: Barret Rhoden <brho@...gle.com>
---
include/linux/sched/ext.h | 18 ++++++++++++++++++
kernel/sched/ext.c | 18 +++++++++++++++++-
2 files changed, 35 insertions(+), 1 deletion(-)
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index 826da32e29ba..63a011860f59 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -378,6 +378,24 @@ struct sched_ext_ops {
*/
void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
+ /**
+ * cpu_online - A CPU became online
+ * @cpu: CPU which just came up
+ *
+ * @cpu just came online. @cpu doesn't call ops.enqueue() or run tasks
+ * associated with other CPUs beforehand.
+ */
+ void (*cpu_online)(s32 cpu);
+
+ /**
+ * cpu_offline - A CPU is going offline
+ * @cpu: CPU which is going offline
+ *
+ * @cpu is going offline. @cpu doesn't call ops.enqueue() or run tasks
+ * associated with other CPUs afterwards.
+ */
+ void (*cpu_offline)(s32 cpu);
+
/**
* prep_enable - Prepare to enable BPF scheduling for a task
* @p: task to prepare BPF scheduling for
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 4f342b7a6f45..dbeec22bee73 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1393,7 +1393,8 @@ static int balance_scx(struct rq *rq, struct task_struct *prev,
* emitted in scx_notify_pick_next_task().
*/
if (SCX_HAS_OP(cpu_acquire))
- SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL);
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_acquire, cpu_of(rq),
+ NULL);
rq->scx.cpu_released = false;
}
@@ -1824,6 +1825,18 @@ void __scx_update_idle(struct rq *rq, bool idle)
}
}
+static void rq_online_scx(struct rq *rq, enum rq_onoff_reason reason)
+{
+ if (SCX_HAS_OP(cpu_online) && reason == RQ_ONOFF_HOTPLUG)
+ SCX_CALL_OP(SCX_KF_REST, cpu_online, cpu_of(rq));
+}
+
+static void rq_offline_scx(struct rq *rq, enum rq_onoff_reason reason)
+{
+ if (SCX_HAS_OP(cpu_offline) && reason == RQ_ONOFF_HOTPLUG)
+ SCX_CALL_OP(SCX_KF_REST, cpu_offline, cpu_of(rq));
+}
+
#else /* !CONFIG_SMP */
static bool test_and_clear_cpu_idle(int cpu) { return false; }
@@ -2329,6 +2342,9 @@ DEFINE_SCHED_CLASS(ext) = {
.balance = balance_scx,
.select_task_rq = select_task_rq_scx,
.set_cpus_allowed = set_cpus_allowed_scx,
+
+ .rq_online = rq_online_scx,
+ .rq_offline = rq_offline_scx,
#endif
.task_tick = task_tick_scx,
--
2.39.2
Powered by blists - more mailing lists