lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240904111223.1035-4-kprateek.nayak@amd.com>
Date: Wed, 4 Sep 2024 11:12:21 +0000
From: K Prateek Nayak <kprateek.nayak@....com>
To: Ingo Molnar <mingo@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
	Juri Lelli <juri.lelli@...hat.com>, Vincent Guittot
	<vincent.guittot@...aro.org>, Dietmar Eggemann <dietmar.eggemann@....com>,
	Steven Rostedt <rostedt@...dmis.org>, Ben Segall <bsegall@...gle.com>, "Mel
 Gorman" <mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>, "Thomas
 Gleixner" <tglx@...utronix.de>
CC: Leonardo Bras <leobras@...hat.com>, "Paul E. McKenney"
	<paulmck@...nel.org>, Rik van Riel <riel@...riel.com>, Thorsten Blum
	<thorsten.blum@...lux.com>, Zqiang <qiang.zhang1211@...il.com>, Tejun Heo
	<tj@...nel.org>, Lai Jiangshan <jiangshanlai@...il.com>, Caleb Sander Mateos
	<csander@...estorage.com>, <linux-kernel@...r.kernel.org>, K Prateek Nayak
	<kprateek.nayak@....com>, "Gautham R . Shenoy" <gautham.shenoy@....com>,
	"Chen Yu" <yu.c.chen@...el.com>, Julia Lawall <Julia.Lawall@...ia.fr>,
	"Sebastian Andrzej Siewior" <bigeasy@...utronix.de>
Subject: [RFC PATCH v2 3/5] softirq: Mask reads of softirq_ctrl.cnt with SOFTIRQ_MASK for PREEMPT_RT

On PREEMPT_RT kernels, softirq_ctrl.cnt tracks the softirq count and set
the task::softirqs_disabled_cnt accordingly. This count is always
'changed by SOFTIRQ_OFFSET when entering and leaving softirq processing'
or 'SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) on local_bh_disable or
local_bh_enable", as stated in the comment about softirq_ctrl
declaration.

Soon, the softirq_ctrl.cnt will also be used by !PREEMPT_RT kernels to
track an impending call to do_softirq() in order to prevent pointless
wakeup of ksoftirqd which will use the lower bits of softirq_ctrl.cnt.

Mask all the current reads of softirq_ctrl.cnt on PREEMPT_RT kernels
with SOFTIRQ_MASK to track only multiples of SOFTIRQ_OFFSET based
changes.

No functional change intended.

Signed-off-by: K Prateek Nayak <kprateek.nayak@....com>
---
v1..v2:

- New patch. Broken off from approach discussed in
  https://lore.kernel.org/lkml/880f13fd-753d-2c5a-488a-d75c99e8dfa3@amd.com/
---
 kernel/softirq.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8c4524ce65fa..e70a51d737ee 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -138,7 +138,7 @@ static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
  */
 bool local_bh_blocked(void)
 {
-	return __this_cpu_read(softirq_ctrl.cnt) != 0;
+	return (__this_cpu_read(softirq_ctrl.cnt) & SOFTIRQ_MASK) != 0;
 }
 
 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
@@ -155,7 +155,8 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 			/* Required to meet the RCU bottomhalf requirements. */
 			rcu_read_lock();
 		} else {
-			DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
+			DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt) &
+					    SOFTIRQ_MASK);
 		}
 	}
 
@@ -163,7 +164,7 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 	 * Track the per CPU softirq disabled state. On RT this is per CPU
 	 * state to allow preemption of bottom half disabled sections.
 	 */
-	newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
+	newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt) & SOFTIRQ_MASK;
 	/*
 	 * Reflect the result in the task state to prevent recursion on the
 	 * local lock and to make softirq_count() & al work.
@@ -184,7 +185,7 @@ static void __local_bh_enable(unsigned int cnt, bool unlock)
 	int newcnt;
 
 	DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
-			    this_cpu_read(softirq_ctrl.cnt));
+			    (this_cpu_read(softirq_ctrl.cnt) & SOFTIRQ_MASK));
 
 	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
 		raw_local_irq_save(flags);
@@ -192,7 +193,7 @@ static void __local_bh_enable(unsigned int cnt, bool unlock)
 		raw_local_irq_restore(flags);
 	}
 
-	newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
+	newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt) & SOFTIRQ_MASK;
 	current->softirq_disable_cnt = newcnt;
 
 	if (!newcnt && unlock) {
@@ -212,7 +213,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 	lockdep_assert_irqs_enabled();
 
 	local_irq_save(flags);
-	curcnt = __this_cpu_read(softirq_ctrl.cnt);
+	curcnt = __this_cpu_read(softirq_ctrl.cnt) & SOFTIRQ_MASK;
 
 	/*
 	 * If this is not reenabling soft interrupts, no point in trying to
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ