lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220316210341.GD14330@worktop.programming.kicks-ass.net>
Date:   Wed, 16 Mar 2022 22:03:41 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     Steven Rostedt <rostedt@...dmis.org>
Cc:     LKML <linux-kernel@...r.kernel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        joel@...lfernandes.org
Subject: Re: sched_core_balance() releasing interrupts with pi_lock held

On Wed, Mar 16, 2022 at 09:27:34PM +0100, Peter Zijlstra wrote:
> On Tue, Mar 15, 2022 at 05:46:06PM -0400, Steven Rostedt wrote:
> > On Tue, 8 Mar 2022 16:14:55 -0500
> > Steven Rostedt <rostedt@...dmis.org> wrote:
> > 
> > > Hi Peter,
> > 
> > Have you had time to look into this?
> 
> Not since I talk to you on IRC about it last week.
> 
> Like I wrote, the balance_callback should be ran under whichever
> rq->lock instance it gets queued under. As per:
> 
>   565790d28b1e ("sched: Fix balance_callback()")
> 
> Now, we only do queue_core_balance() from set_next_task_idle(), which
> *should* only happen from pick_next_task(), and as such the callback
> should only ever get called from finish_lock_switch() or the 'prev ==
> next' case in __schedule().
> 
> Neither of these two sites holds pi_lock.
> 
> 
> This is about as far as I got explaining things, and it being late, it's
> about as far as I got looking at things.
> 
> Now that also makes conceptual sense, we only want to pull a core-cookie
> task when we're scheduling an idle task.
> 
> Now, clearly this gets triggered from the PI path, but that's not making
> immediate sense to me, it would mean we're boosting the idle task, which
> is wrong too.
> 
> So it would be useful for someone that can reproduce this to provide a
> trace of where queue_core_balance() gets called, because that *should*
> only be in __schedule().

Does something like the below (untested in the extreme) help?

---
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 83872f95a1ea..18163454bb47 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5665,6 +5665,8 @@ static inline struct task_struct *pick_task(struct rq *rq)
 
 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
 
+static void queue_core_balance(struct rq *rq);
+
 static struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
@@ -5714,7 +5716,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 		}
 
 		rq->core_pick = NULL;
-		return next;
+		goto out;
 	}
 
 	put_prev_task_balance(rq, prev, rf);
@@ -5764,7 +5766,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 			 */
 			WARN_ON_ONCE(fi_before);
 			task_vruntime_update(rq, next, false);
-			goto done;
+			goto out_set_next;
 		}
 	}
 
@@ -5884,8 +5886,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 		resched_curr(rq_i);
 	}
 
-done:
+out_set_next:
 	set_next_task(rq, next);
+out:
+	if (rq->core->core_forceidle_count && next == rq->idle)
+		queue_core_balance(rq);
+
 	return next;
 }
 
@@ -5914,7 +5920,7 @@ static bool try_steal_cookie(int this, int that)
 		if (p == src->core_pick || p == src->curr)
 			goto next;
 
-		if (!cpumask_test_cpu(this, &p->cpus_mask))
+		if (!is_cpu_allowed(p, this))
 			goto next;
 
 		if (p->core_occupation > dst->idle->core_occupation)
@@ -5980,7 +5986,7 @@ static void sched_core_balance(struct rq *rq)
 
 static DEFINE_PER_CPU(struct callback_head, core_balance_head);
 
-void queue_core_balance(struct rq *rq)
+static void queue_core_balance(struct rq *rq)
 {
 	if (!sched_core_enabled(rq))
 		return;
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index d17b0a5ce6ac..314c36fc9c42 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -437,7 +437,6 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
 {
 	update_idle_core(rq);
 	schedstat_inc(rq->sched_goidle);
-	queue_core_balance(rq);
 }
 
 #ifdef CONFIG_SMP
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index de53be905739..b85c9344779a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1247,8 +1247,6 @@ static inline bool sched_group_cookie_match(struct rq *rq,
 	return false;
 }
 
-extern void queue_core_balance(struct rq *rq);
-
 static inline bool sched_core_enqueued(struct task_struct *p)
 {
 	return !RB_EMPTY_NODE(&p->core_node);
@@ -1282,10 +1280,6 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
 	return &rq->__lock;
 }
 
-static inline void queue_core_balance(struct rq *rq)
-{
-}
-
 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
 {
 	return true;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ