[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240528003521.979836-22-ankur.a.arora@oracle.com>
Date: Mon, 27 May 2024 17:35:07 -0700
From: Ankur Arora <ankur.a.arora@...cle.com>
To: linux-kernel@...r.kernel.org
Cc: tglx@...utronix.de, peterz@...radead.org, torvalds@...ux-foundation.org,
paulmck@...nel.org, rostedt@...dmis.org, mark.rutland@....com,
juri.lelli@...hat.com, joel@...lfernandes.org, raghavendra.kt@....com,
sshegde@...ux.ibm.com, boris.ostrovsky@...cle.com,
konrad.wilk@...cle.com, Ankur Arora <ankur.a.arora@...cle.com>,
Ingo Molnar <mingo@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>
Subject: [PATCH v2 21/35] sched: prepare for lazy rescheduling in resched_curr()
Handle RESCHED_LAZY in resched_curr(), by registering an intent to
reschedule at exit-to-user.
Given that the rescheduling is not imminent, skip the preempt folding
and the resched IPI.
Also, update set_nr_and_not_polling() to handle RESCHED_LAZY. Note that
there are no changes to set_nr_if_polling(), since lazy rescheduling
is not meaningful for idle.
And finally, now that there are two need-resched bits, enforce a
priority order while setting them.
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Juri Lelli <juri.lelli@...hat.com>
Cc: Vincent Guittot <vincent.guittot@...aro.org>
Originally-by: Thomas Gleixner <tglx@...utronix.de>
Link: https://lore.kernel.org/lkml/87jzshhexi.ffs@tglx/
Signed-off-by: Ankur Arora <ankur.a.arora@...cle.com>
---
kernel/sched/core.c | 35 +++++++++++++++++++++++------------
1 file changed, 23 insertions(+), 12 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index df8e333f2d8b..27b908cc9134 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -899,14 +899,14 @@ static inline void hrtick_rq_init(struct rq *rq)
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
/*
- * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
+ * Atomically set TIF_NEED_RESCHED[_LAZY] and test for TIF_POLLING_NRFLAG,
* this avoids any races wrt polling state changes and thereby avoids
* spurious IPIs.
*/
-static inline bool set_nr_and_not_polling(struct task_struct *p)
+static inline bool set_nr_and_not_polling(struct task_struct *p, resched_t rs)
{
struct thread_info *ti = task_thread_info(p);
- return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
+ return !(fetch_or(&ti->flags, _tif_resched(rs)) & _TIF_POLLING_NRFLAG);
}
/*
@@ -931,9 +931,9 @@ static bool set_nr_if_polling(struct task_struct *p)
}
#else
-static inline bool set_nr_and_not_polling(struct task_struct *p)
+static inline bool set_nr_and_not_polling(struct task_struct *p, resched_t rs)
{
- __set_tsk_need_resched(p, RESCHED_NOW);
+ __set_tsk_need_resched(p, rs);
return true;
}
@@ -1041,25 +1041,34 @@ void wake_up_q(struct wake_q_head *head)
void resched_curr(struct rq *rq)
{
struct task_struct *curr = rq->curr;
+ resched_t rs = RESCHED_NOW;
int cpu;
lockdep_assert_rq_held(rq);
- if (__test_tsk_need_resched(curr, RESCHED_NOW))
+ /*
+ * TIF_NEED_RESCHED is the higher priority bit, so if it is already
+ * set, nothing more to be done.
+ */
+ if (__test_tsk_need_resched(curr, RESCHED_NOW) ||
+ (rs == RESCHED_LAZY && __test_tsk_need_resched(curr, RESCHED_LAZY)))
return;
cpu = cpu_of(rq);
if (cpu == smp_processor_id()) {
- __set_tsk_need_resched(curr, RESCHED_NOW);
- set_preempt_need_resched();
+ __set_tsk_need_resched(curr, rs);
+ if (rs == RESCHED_NOW)
+ set_preempt_need_resched();
return;
}
- if (set_nr_and_not_polling(curr))
- smp_send_reschedule(cpu);
- else
+ if (set_nr_and_not_polling(curr, rs)) {
+ if (rs == RESCHED_NOW)
+ smp_send_reschedule(cpu);
+ } else {
trace_sched_wake_idle_without_ipi(cpu);
+ }
}
void resched_cpu(int cpu)
@@ -1154,7 +1163,7 @@ static void wake_up_idle_cpu(int cpu)
* and testing of the above solutions didn't appear to report
* much benefits.
*/
- if (set_nr_and_not_polling(rq->idle))
+ if (set_nr_and_not_polling(rq->idle, RESCHED_NOW))
smp_send_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
@@ -6704,6 +6713,8 @@ static void __sched notrace __schedule(unsigned int sched_mode)
}
next = pick_next_task(rq, prev, &rf);
+
+ /* Clear both TIF_NEED_RESCHED, TIF_NEED_RESCHED_LAZY */
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
--
2.31.1
Powered by blists - more mailing lists