[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250523181448.3777233-5-sshegde@linux.ibm.com>
Date: Fri, 23 May 2025 23:44:47 +0530
From: Shrikanth Hegde <sshegde@...ux.ibm.com>
To: mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, tglx@...utronix.de, yury.norov@...il.com,
maddy@...ux.ibm.com
Cc: sshegde@...ux.ibm.com, vschneid@...hat.com, dietmar.eggemann@....com,
rostedt@...dmis.org, jstultz@...gle.com, kprateek.nayak@....com,
huschle@...ux.ibm.com, srikar@...ux.ibm.com,
linux-kernel@...r.kernel.org, linux@...musvillemoes.dk
Subject: [RFC PATCH 4/5] sched/core: Push current task when cpu is parked
When a CPU becomes parked, all tasks present on that CPU should vacate
it. Use existing __balance_push_cpu_stop mechanism to move out current
running task.
Which CPUs need to be parked is to be decided by architecture.
Signed-off-by: Shrikanth Hegde <sshegde@...ux.ibm.com>
---
Note: Maybe this could be done only for CFS, EXT tasks if it is not
recommended for RT,DL etc.
kernel/sched/core.c | 39 +++++++++++++++++++++++++++++++++++++++
kernel/sched/sched.h | 1 +
2 files changed, 40 insertions(+)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9ec12f9b3b08..dd8e824bc030 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5656,6 +5656,10 @@ void sched_tick(void)
sched_clock_tick();
+ /* push the current task out if cpu is parked */
+ if (cpu_parked(cpu))
+ push_current_task(rq);
+
rq_lock(rq, &rf);
donor = rq->donor;
@@ -8482,6 +8486,41 @@ void __init sched_init_smp(void)
}
#endif /* CONFIG_SMP */
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_CPU)
+static DEFINE_PER_CPU(struct cpu_stop_work, push_task_work);
+
+/* A parked CPU is when underlying physical CPU is not available.
+ * Scheduling on such CPU is going to cause OS preemption.
+ * In case any task is scheduled on such CPU, move it out. In
+ * select_fallback_rq a non parked CPU will be chosen and henceforth
+ * task shouldn't come back to this CPU
+ */
+void push_current_task(struct rq *rq)
+{
+ struct task_struct *push_task = rq->curr;
+ unsigned long flags;
+
+ /* idle task can't be pused out */
+ if (rq->curr == rq->idle || !cpu_parked(rq->cpu))
+ return;
+
+ if (kthread_is_per_cpu(push_task) ||
+ is_migration_disabled(push_task))
+ return;
+
+ local_irq_save(flags);
+ get_task_struct(push_task);
+ preempt_disable();
+
+ stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
+ this_cpu_ptr(&push_task_work));
+ preempt_enable();
+ local_irq_restore(flags);
+}
+#else
+void push_current_task(struct rq *rq) { };
+#endif
+
int in_sched_functions(unsigned long addr)
{
return in_lock_functions(addr) ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c5a6a503eb6d..86bcd9401d41 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -104,6 +104,7 @@ extern void calc_global_load_tick(struct rq *this_rq);
extern long calc_load_fold_active(struct rq *this_rq, long adjust);
extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
+void push_current_task(struct rq *rq);
extern int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
--
2.39.3
Powered by blists - more mailing lists