[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231107215742.363031-40-ankur.a.arora@oracle.com>
Date: Tue, 7 Nov 2023 13:57:25 -0800
From: Ankur Arora <ankur.a.arora@...cle.com>
To: linux-kernel@...r.kernel.org
Cc: tglx@...utronix.de, peterz@...radead.org,
torvalds@...ux-foundation.org, paulmck@...nel.org,
linux-mm@...ck.org, x86@...nel.org, akpm@...ux-foundation.org,
luto@...nel.org, bp@...en8.de, dave.hansen@...ux.intel.com,
hpa@...or.com, mingo@...hat.com, juri.lelli@...hat.com,
vincent.guittot@...aro.org, willy@...radead.org, mgorman@...e.de,
jon.grimm@....com, bharata@....com, raghavendra.kt@....com,
boris.ostrovsky@...cle.com, konrad.wilk@...cle.com,
jgross@...e.com, andrew.cooper3@...rix.com, mingo@...nel.org,
bristot@...nel.org, mathieu.desnoyers@...icios.com,
geert@...ux-m68k.org, glaubitz@...sik.fu-berlin.de,
anton.ivanov@...bridgegreys.com, mattst88@...il.com,
krypton@...ich-teichert.org, rostedt@...dmis.org,
David.Laight@...LAB.COM, richard@....at, mjguzik@...il.com,
Ankur Arora <ankur.a.arora@...cle.com>
Subject: [RFC PATCH 39/86] sched: handle lazy resched in set_nr_*_polling()
To trigger a reschedule on a target runqueue a few things need
to happen first:
1. set_tsk_need_resched(target_rq->curr, RESCHED_eager)
2. ensure that the target CPU sees the need-resched bit
3. preempt_fold_need_resched()
Most of this is done via some combination of: resched_curr(),
set_nr_if_polling(), and set_nr_and_not_polling().
Update the last two to also handle TIF_NEED_RESCHED_LAZY.
One thing to note is that TIF_NEED_RESCHED_LAZY has run to completion
semantics, so unlike TIF_NEED_RESCHED, we don't need to ensure that
the caller sees it, and of course there is no preempt folding.
Originally-by: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Ankur Arora <ankur.a.arora@...cle.com>
---
kernel/sched/core.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e2215c417323..01df5ac2982c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -892,14 +892,15 @@ static inline void hrtick_rq_init(struct rq *rq)
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
/*
- * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
+ * Atomically set TIF_NEED_RESCHED[_LAZY] and test for TIF_POLLING_NRFLAG,
* this avoids any races wrt polling state changes and thereby avoids
* spurious IPIs.
*/
-static inline bool set_nr_and_not_polling(struct task_struct *p)
+static inline bool set_nr_and_not_polling(struct task_struct *p, resched_t rs)
{
struct thread_info *ti = task_thread_info(p);
- return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
+
+ return !(fetch_or(&ti->flags, _tif_resched(rs)) & _TIF_POLLING_NRFLAG);
}
/*
@@ -916,7 +917,7 @@ static bool set_nr_if_polling(struct task_struct *p)
for (;;) {
if (!(val & _TIF_POLLING_NRFLAG))
return false;
- if (val & _TIF_NEED_RESCHED)
+ if (val & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
return true;
if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
break;
@@ -925,9 +926,9 @@ static bool set_nr_if_polling(struct task_struct *p)
}
#else
-static inline bool set_nr_and_not_polling(struct task_struct *p)
+static inline bool set_nr_and_not_polling(struct task_struct *p, resched_t rs)
{
- set_tsk_need_resched(p, RESCHED_eager);
+ set_tsk_need_resched(p, rs);
return true;
}
@@ -1050,7 +1051,7 @@ void resched_curr(struct rq *rq)
return;
}
- if (set_nr_and_not_polling(curr))
+ if (set_nr_and_not_polling(curr, RESCHED_eager))
smp_send_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
@@ -1126,7 +1127,7 @@ static void wake_up_idle_cpu(int cpu)
if (cpu == smp_processor_id())
return;
- if (set_nr_and_not_polling(rq->idle))
+ if (set_nr_and_not_polling(rq->idle, RESCHED_eager))
smp_send_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
--
2.31.1
Powered by blists - more mailing lists