[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230525165244.GV83892@hirez.programming.kicks-ass.net>
Date: Thu, 25 May 2023 18:52:44 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: linux-kernel@...r.kernel.org, Ben Segall <bsegall@...gle.com>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Ingo Molnar <mingo@...hat.com>,
Juri Lelli <juri.lelli@...hat.com>,
Mel Gorman <mgorman@...e.de>,
Steven Rostedt <rostedt@...dmis.org>,
Thomas Gleixner <tglx@...utronix.de>,
Valentin Schneider <vschneid@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>
Subject: Re: [PATCH] sched: Consider task_struct::saved_state in
wait_task_inactive().
On Fri, Feb 17, 2023 at 03:53:02PM +0100, Sebastian Andrzej Siewior wrote:
> +static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state)
> +{
> + unsigned long flags;
> + bool mismatch;
> +
> + raw_spin_lock_irqsave(&p->pi_lock, flags);
> + if (READ_ONCE(p->__state) & match_state)
> + mismatch = false;
> + else if (READ_ONCE(p->saved_state) & match_state)
> + mismatch = false;
> + else
> + mismatch = true;
> +
> + raw_spin_unlock_irqrestore(&p->pi_lock, flags);
> + return mismatch;
> +}
> +static __always_inline bool state_match(struct task_struct *p, unsigned int match_state,
> + bool *wait)
> +{
> + if (READ_ONCE(p->__state) & match_state)
> + return true;
> + if (READ_ONCE(p->saved_state) & match_state) {
> + *wait = true;
> + return true;
> + }
> + return false;
> +}
> +#else
> +static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state)
> +{
> + return !(READ_ONCE(p->__state) & match_state);
> +}
> +static __always_inline bool state_match(struct task_struct *p, unsigned int match_state,
> + bool *wait)
> +{
> + return (READ_ONCE(p->__state) & match_state);
> +}
> +#endif
> +
> /*
> * wait_task_inactive - wait for a thread to unschedule.
> *
Urgh...
I've ended up with the below.. I've tried folding it with
ttwu_state_match() but every attempt so far makes it an unholy mess.
Now, if only we had proper lock guard then we could drop another few
lines, but alas.
---
kernel/sched/core.c | 35 +++++++++++++++++++++++++++++++++--
1 file changed, 33 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a68d1276bab0..5a106629a98d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3341,6 +3341,37 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
}
#endif /* CONFIG_NUMA_BALANCING */
+static __always_inline
+bool __wti_state_match(struct task_struct *p, unsigned int state, int *queued)
+{
+ if (READ_ONCE(p->__state) & state)
+ return true;
+
+#ifdef CONFIG_PREEMPT_RT
+ if (READ_ONCE(p->saved_state) & state) {
+ if (queued)
+ *queued = 1;
+ return true;
+ }
+#endif
+ return false;
+}
+
+static __always_inline bool wti_state_match(struct task_struct *p, unsigned int state)
+{
+#ifdef CONFIG_PREEMPT_RT
+ bool match;
+
+ raw_spin_lock_irq(&p->pi_lock);
+ match = __wti_state_match(p, state, NULL);
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ return match;
+#else
+ return __wti_state_match(p, state, NULL);
+#endif
+}
+
/*
* wait_task_inactive - wait for a thread to unschedule.
*
@@ -3385,7 +3416,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
* is actually now running somewhere else!
*/
while (task_on_cpu(rq, p)) {
- if (!(READ_ONCE(p->__state) & match_state))
+ if (!wti_state_match(p, match_state))
return 0;
cpu_relax();
}
@@ -3400,7 +3431,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
running = task_on_cpu(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
- if (READ_ONCE(p->__state) & match_state)
+ if (__wti_state_match(p, match_state, &queued))
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf);
Powered by blists - more mailing lists