lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190212171423.8308-26-frederic@kernel.org>
Date:   Tue, 12 Feb 2019 18:14:16 +0100
From:   Frederic Weisbecker <frederic@...nel.org>
To:     LKML <linux-kernel@...r.kernel.org>
Cc:     Frederic Weisbecker <frederic@...nel.org>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        Peter Zijlstra <peterz@...radead.org>,
        Mauro Carvalho Chehab <mchehab@...pensource.com>,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        "David S . Miller" <davem@...emloft.net>,
        Thomas Gleixner <tglx@...utronix.de>,
        "Paul E . McKenney" <paulmck@...ux.vnet.ibm.com>,
        Frederic Weisbecker <fweisbec@...il.com>,
        Pavan Kondeti <pkondeti@...eaurora.org>,
        Ingo Molnar <mingo@...nel.org>,
        Joel Fernandes <joel@...lfernandes.org>
Subject: [PATCH 25/32] softirq: Prepare for mixing all/per-vector masking

In order to be able to mix and nest full and per vector softirq masking,
we need to be able to track the nesting state using a "full masking"
counter and a mask of "individual disabled vectors".

Start with introducing the full masking counter. For now it's a simple
mirror of softirq_count() because there is no per vector masking API
yet.

When this full masking counter is non 0, all softirq vectors are
explicitly disabled.

Suggested-by: Linus Torvalds <torvalds@...ux-foundation.org>
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Mauro Carvalho Chehab <mchehab@...pensource.com>
Cc: Joel Fernandes <joel@...lfernandes.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Pavan Kondeti <pkondeti@...eaurora.org>
Cc: Paul E . McKenney <paulmck@...ux.vnet.ibm.com>
Cc: David S . Miller <davem@...emloft.net>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
---
 kernel/softirq.c | 33 ++++++++++++++++++++++-----------
 1 file changed, 22 insertions(+), 11 deletions(-)

diff --git a/kernel/softirq.c b/kernel/softirq.c
index 91dee716e139..4477a03afd94 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -57,6 +57,12 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
+struct softirq_nesting {
+	unsigned int disabled_all;
+};
+
+static DEFINE_PER_CPU(struct softirq_nesting, softirq_nesting);
+
 const char * const softirq_to_name[NR_SOFTIRQS] = {
 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
 	"TASKLET", "SCHED", "HRTIMER", "RCU"
@@ -118,11 +124,11 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 	 * call the trace_preempt_off later.
 	 */
 	__preempt_count_add(cnt);
-	/*
-	 * Were softirqs turned off above:
-	 */
-	if (softirq_count() == (cnt & SOFTIRQ_MASK))
+
+	if (__this_cpu_inc_return(softirq_nesting.disabled_all) == 1) {
+		softirq_enabled_clear_mask(SOFTIRQ_ALL_MASK);
 		trace_softirqs_off(ip);
+	}
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 	raw_local_irq_restore(flags);
@@ -137,6 +143,15 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 }
 EXPORT_SYMBOL(__local_bh_disable_ip);
 
+static void local_bh_enable_common(unsigned long ip, unsigned int cnt)
+{
+	if (__this_cpu_dec_return(softirq_nesting.disabled_all))
+		return;
+
+	softirq_enabled_set(SOFTIRQ_ALL_MASK);
+	trace_softirqs_on(ip);
+}
+
 static void __local_bh_enable_no_softirq(unsigned int cnt)
 {
 	lockdep_assert_irqs_disabled();
@@ -144,8 +159,7 @@ static void __local_bh_enable_no_softirq(unsigned int cnt)
 	if (preempt_count() == cnt)
 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
 
-	if (softirq_count() == (cnt & SOFTIRQ_MASK))
-		trace_softirqs_on(_RET_IP_);
+	local_bh_enable_common(_RET_IP_, cnt);
 
 	__preempt_count_sub(cnt);
 }
@@ -168,11 +182,8 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 #ifdef CONFIG_TRACE_IRQFLAGS
 	local_irq_disable();
 #endif
-	/*
-	 * Are softirqs going to be turned on now:
-	 */
-	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
-		trace_softirqs_on(ip);
+	local_bh_enable_common(ip, cnt);
+
 	/*
 	 * Keep preemption disabled until we are done with
 	 * softirq processing:
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ