[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YIZ6ZpkrMGQ9A9x2@hirez.programming.kicks-ass.net>
Date: Mon, 26 Apr 2021 10:31:34 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Josh Don <joshdon@...gle.com>
Cc: Joel Fernandes <joel@...lfernandes.org>,
"Hyser,Chris" <chris.hyser@...cle.com>,
Ingo Molnar <mingo@...nel.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Valentin Schneider <valentin.schneider@....com>,
Mel Gorman <mgorman@...e.de>,
linux-kernel <linux-kernel@...r.kernel.org>,
Thomas Gleixner <tglx@...utronix.de>
Subject: Re: [PATCH 04/19] sched: Prepare for Core-wide rq->lock
On Fri, Apr 23, 2021 at 06:22:52PM -0700, Josh Don wrote:
> Hi Peter,
>
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -186,12 +186,37 @@ int sysctl_sched_rt_runtime = 950000;
> >
> > void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
> > {
> > - raw_spin_lock_nested(rq_lockp(rq), subclass);
> > + raw_spinlock_t *lock;
> > +
> > + if (sched_core_disabled()) {
>
> Nothing to stop sched_core from being enabled right here? Leading to
> us potentially taking the wrong lock.
>
> > + raw_spin_lock_nested(&rq->__lock, subclass);
> > + return;
> > + }
> > +
> > + for (;;) {
> > + lock = rq_lockp(rq);
> > + raw_spin_lock_nested(lock, subclass);
> > + if (likely(lock == rq_lockp(rq)))
> > + return;
> > + raw_spin_unlock(lock);
> > + }
> > }
Very good; something like the below seems to be the best I can make of
it..
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f732642e3e09..1a81e9cc9e5d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -290,6 +290,10 @@ static void sched_core_assert_empty(void)
static void __sched_core_enable(void)
{
static_branch_enable(&__sched_core_enabled);
+ /*
+ * Ensure raw_spin_rq_*lock*() have completed before flipping.
+ */
+ synchronize_sched();
__sched_core_flip(true);
sched_core_assert_empty();
}
@@ -449,16 +453,22 @@ void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
{
raw_spinlock_t *lock;
+ preempt_disable();
if (sched_core_disabled()) {
raw_spin_lock_nested(&rq->__lock, subclass);
+ /* preempt *MUST* still be disabled here */
+ preempt_enable_no_resched();
return;
}
for (;;) {
lock = __rq_lockp(rq);
raw_spin_lock_nested(lock, subclass);
- if (likely(lock == __rq_lockp(rq)))
+ if (likely(lock == __rq_lockp(rq))) {
+ /* preempt *MUST* still be disabled here */
+ preempt_enable_no_resched();
return;
+ }
raw_spin_unlock(lock);
}
}
@@ -468,14 +478,20 @@ bool raw_spin_rq_trylock(struct rq *rq)
raw_spinlock_t *lock;
bool ret;
- if (sched_core_disabled())
- return raw_spin_trylock(&rq->__lock);
+ preempt_disable();
+ if (sched_core_disabled()) {
+ ret = raw_spin_trylock(&rq->__lock);
+ preempt_enable();
+ return ret;
+ }
for (;;) {
lock = __rq_lockp(rq);
ret = raw_spin_trylock(lock);
- if (!ret || (likely(lock == __rq_lockp(rq))))
+ if (!ret || (likely(lock == __rq_lockp(rq)))) {
+ preempt_enable();
return ret;
+ }
raw_spin_unlock(lock);
}
}
Powered by blists - more mailing lists