lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250904041516.3046-18-kprateek.nayak@amd.com>
Date: Thu, 4 Sep 2025 04:15:13 +0000
From: K Prateek Nayak <kprateek.nayak@....com>
To: Ingo Molnar <mingo@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
	Juri Lelli <juri.lelli@...hat.com>, Vincent Guittot
	<vincent.guittot@...aro.org>, Anna-Maria Behnsen <anna-maria@...utronix.de>,
	Frederic Weisbecker <frederic@...nel.org>, Thomas Gleixner
	<tglx@...utronix.de>, <linux-kernel@...r.kernel.org>
CC: Dietmar Eggemann <dietmar.eggemann@....com>, Steven Rostedt
	<rostedt@...dmis.org>, Ben Segall <bsegall@...gle.com>, Mel Gorman
	<mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>, K Prateek Nayak
	<kprateek.nayak@....com>, "Gautham R. Shenoy" <gautham.shenoy@....com>,
	Swapnil Sapkal <swapnil.sapkal@....com>
Subject: [RFC PATCH 17/19] sched/fair: Remove "nohz.idle_cpus_mask"

All users of "nohz.idle_cpus_mask" have been converted to use the
distributed nohz idle tracking using "nohz_shared_list". Get rid of the
centralized "nohz.idle_cpus_mask" based tracking.

Signed-off-by: K Prateek Nayak <kprateek.nayak@....com>
---
 kernel/sched/fair.c | 27 ++++++++++++---------------
 1 file changed, 12 insertions(+), 15 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c7ac8e7094ed..5b693bd0fab4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7169,7 +7169,6 @@ static DEFINE_PER_CPU(cpumask_var_t, should_we_balance_tmpmask);
 #ifdef CONFIG_NO_HZ_COMMON
 
 static struct {
-	cpumask_var_t idle_cpus_mask;
 	atomic_t nr_cpus;
 	int has_blocked;		/* Idle CPUS has blocked load */
 	int needs_update;		/* Newly idle CPUs need their next_balance collated */
@@ -12517,7 +12516,6 @@ void nohz_balance_exit_idle(struct rq *rq)
 		return;
 
 	WRITE_ONCE(rq->nohz_tick_stopped, 0);
-	cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
 	atomic_dec(&nohz.nr_cpus);
 
 	set_cpu_sd_state_busy(rq->cpu);
@@ -12576,8 +12574,9 @@ void nohz_balance_enter_idle(int cpu)
 	/*
 	 * The tick is still stopped but load could have been added in the
 	 * meantime. We set the nohz.has_blocked flag to trig a check of the
-	 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
-	 * of nohz.has_blocked can only happen after checking the new load
+	 * *_avg. The CPU is already part of sd_nohz->idle_cpus_mask so the
+	 * clear of nohz.has_blocked can only happen after checking the new
+	 * load
 	 */
 	if (READ_ONCE(rq->nohz_tick_stopped))
 		goto out;
@@ -12588,7 +12587,6 @@ void nohz_balance_enter_idle(int cpu)
 
 	WRITE_ONCE(rq->nohz_tick_stopped, 1);
 
-	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
 	atomic_inc(&nohz.nr_cpus);
 
 	set_cpu_sd_state_idle(cpu);
@@ -12832,15 +12830,15 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
  * entering idle state. Here we run ILB directly without issuing IPIs.
  *
  * Note that when this function is called, the tick may not yet be stopped on
- * this CPU yet. nohz.idle_cpus_mask is updated only when tick is stopped and
- * cleared on the next busy tick. In other words, nohz.idle_cpus_mask updates
- * don't align with CPUs enter/exit idle to avoid bottlenecks due to high idle
- * entry/exit rate (usec). So it is possible that _nohz_idle_balance() is
- * called from this function on (this) CPU that's not yet in the mask. That's
- * OK because the goal of nohz_run_idle_balance() is to run ILB only for
- * updating the blocked load of already idle CPUs without waking up one of
- * those idle CPUs and outside the preempt disable / IRQ off phase of the local
- * cpu about to enter idle, because it can take a long time.
+ * this CPU yet. sd_nohz->idle_cpus_mask is updated only when tick is stopped
+ * and cleared on the next busy tick. In other words, sd_nohz->idle_cpus_mask
+ * updates don't align with CPUs enter/exit idle to avoid bottlenecks due to
+ * high idle entry/exit rate (usec). So it is possible that
+ * _nohz_idle_balance() is called from this function on (this) CPU that's not
+ * yet in the mask. That's OK because the goal of nohz_run_idle_balance() is to
+ * run ILB only for updating the blocked load of already idle CPUs without
+ * waking up one of those idle CPUs and outside the preempt disable / IRQ off
+ * phase of the local cpu about to enter idle, because it can take a long time.
  */
 void nohz_run_idle_balance(int cpu)
 {
@@ -13841,6 +13839,5 @@ __init void init_sched_fair_class(void)
 #ifdef CONFIG_NO_HZ_COMMON
 	nohz.next_balance = jiffies;
 	nohz.next_blocked = jiffies;
-	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
 #endif
 }
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ