lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 11 Oct 2018 01:12:12 +0200
From:   Frederic Weisbecker <frederic@...nel.org>
To:     LKML <linux-kernel@...r.kernel.org>
Cc:     Frederic Weisbecker <frederic@...nel.org>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        Peter Zijlstra <peterz@...radead.org>,
        "David S . Miller" <davem@...emloft.net>,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        "Paul E . McKenney" <paulmck@...ux.vnet.ibm.com>,
        Ingo Molnar <mingo@...nel.org>,
        Frederic Weisbecker <fweisbec@...il.com>,
        Mauro Carvalho Chehab <mchehab@...pensource.com>
Subject: [RFC PATCH 25/30] softirq: Push down softirq mask to __local_bh_disable_ip()

Now that all callers are ready, we can push down the softirq enabled
mask to the core from callers such as spin_lock_bh(), local_bh_disable(),
rcu_read_lock_bh(), etc...

It is applied to the CPU vector enabled mask on __local_bh_disable_ip()
which then returns the old value to be restored on __local_bh_enable_ip().

Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: David S. Miller <davem@...emloft.net>
Cc: Mauro Carvalho Chehab <mchehab@...pensource.com>
Cc: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
---
 include/linux/bottom_half.h      | 19 ++++++++++---------
 include/linux/rwlock_api_smp.h   | 14 ++++++++------
 include/linux/spinlock_api_smp.h | 10 +++++-----
 kernel/softirq.c                 | 28 +++++++++++++++++++---------
 4 files changed, 42 insertions(+), 29 deletions(-)

diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 31fcdae..f8a68c8 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -37,9 +37,10 @@ enum
 
 
 #ifdef CONFIG_TRACE_IRQFLAGS
-extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+extern unsigned int __local_bh_disable_ip(unsigned long ip, unsigned int cnt,
+					  unsigned int mask);
 #else
-static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+static __always_inline unsigned int __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 {
 	preempt_count_add(cnt);
 	barrier();
@@ -48,21 +49,21 @@ static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int
 
 static inline unsigned int local_bh_disable(unsigned int mask)
 {
-	__local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
-	return 0;
+	return __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET, mask);
 }
 
-extern void local_bh_enable_no_softirq(void);
-extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
+extern void local_bh_enable_no_softirq(unsigned int bh);
+extern void __local_bh_enable_ip(unsigned long ip,
+				 unsigned int cnt, unsigned int bh);
 
-static inline void local_bh_enable_ip(unsigned long ip)
+static inline void local_bh_enable_ip(unsigned long ip, unsigned int bh)
 {
-	__local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET);
+	__local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET, bh);
 }
 
 static inline void local_bh_enable(unsigned int bh)
 {
-	__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+	__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET, bh);
 }
 
 extern void local_bh_disable_all(void);
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index fb66489..90ba7bf 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -173,10 +173,11 @@ static inline void __raw_read_lock_irq(rwlock_t *lock)
 static inline unsigned int __raw_read_lock_bh(rwlock_t *lock,
 					      unsigned int mask)
 {
-	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+	unsigned int bh;
+	bh = __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET, mask);
 	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 	LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
-	return 0;
+	return bh;
 }
 
 static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
@@ -202,10 +203,11 @@ static inline void __raw_write_lock_irq(rwlock_t *lock)
 static inline unsigned int  __raw_write_lock_bh(rwlock_t *lock,
 						unsigned int mask)
 {
-	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+	unsigned int bh;
+	bh = __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET, mask);
 	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 	LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
-	return 0;
+	return bh;
 }
 
 static inline void __raw_write_lock(rwlock_t *lock)
@@ -253,7 +255,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock,
 {
 	rwlock_release(&lock->dep_map, 1, _RET_IP_);
 	do_raw_read_unlock(lock);
-	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET, bh);
 }
 
 static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
@@ -278,7 +280,7 @@ static inline void __raw_write_unlock_bh(rwlock_t *lock,
 {
 	rwlock_release(&lock->dep_map, 1, _RET_IP_);
 	do_raw_write_unlock(lock);
-	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET, bh);
 }
 
 #endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 42bbf68..6602a56 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -132,9 +132,9 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
 
 static inline unsigned int __raw_spin_lock_bh(raw_spinlock_t *lock, unsigned int mask)
 {
-	unsigned int bh = 0;
+	unsigned int bh;
 
-	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+	bh = __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET, mask);
 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 
@@ -179,19 +179,19 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock,
 {
 	spin_release(&lock->dep_map, 1, _RET_IP_);
 	do_raw_spin_unlock(lock);
-	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET, bh);
 }
 
 static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock,
 					unsigned int *bh,
 					unsigned int mask)
 {
-	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+	*bh = __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET, mask);
 	if (do_raw_spin_trylock(lock)) {
 		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 		return 1;
 	}
-	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET, *bh);
 	return 0;
 }
 
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 22cc0a7..e2435b0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -107,13 +107,16 @@ static bool ksoftirqd_running(unsigned long pending)
  * where hardirqs are disabled legitimately:
  */
 #ifdef CONFIG_TRACE_IRQFLAGS
-void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+unsigned int __local_bh_disable_ip(unsigned long ip, unsigned int cnt,
+				   unsigned int mask)
 {
 	unsigned long flags;
+	unsigned int enabled;
 
 	WARN_ON_ONCE(in_irq());
 
 	raw_local_irq_save(flags);
+
 	/*
 	 * The preempt tracer hooks into preempt_count_add and will break
 	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
@@ -127,6 +130,9 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 	 */
 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
 		trace_softirqs_off(ip);
+
+	enabled = local_softirq_enabled();
+	softirq_enabled_nand(mask);
 	raw_local_irq_restore(flags);
 
 	if (preempt_count() == cnt) {
@@ -135,6 +141,7 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 #endif
 		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
 	}
+	return enabled;
 }
 EXPORT_SYMBOL(__local_bh_disable_ip);
 #endif /* CONFIG_TRACE_IRQFLAGS */
@@ -143,11 +150,13 @@ EXPORT_SYMBOL(__local_bh_disable_ip);
  * Special-case - softirqs can safely be enabled by __do_softirq(),
  * without processing still-pending softirqs:
  */
-void local_bh_enable_no_softirq(void)
+void local_bh_enable_no_softirq(unsigned int bh)
 {
 	WARN_ON_ONCE(in_irq());
 	lockdep_assert_irqs_disabled();
 
+	softirq_enabled_set(bh);
+
 	if (preempt_count() == SOFTIRQ_DISABLE_OFFSET)
 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
 
@@ -155,17 +164,18 @@ void local_bh_enable_no_softirq(void)
 		trace_softirqs_on(_RET_IP_);
 
 	__preempt_count_sub(SOFTIRQ_DISABLE_OFFSET);
-
 }
 EXPORT_SYMBOL(local_bh_enable_no_softirq);
 
-void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+void __local_bh_enable_ip(unsigned long ip, unsigned int cnt, unsigned int bh)
 {
 	WARN_ON_ONCE(in_irq());
 	lockdep_assert_irqs_enabled();
 #ifdef CONFIG_TRACE_IRQFLAGS
 	local_irq_disable();
 #endif
+	softirq_enabled_set(bh);
+
 	/*
 	 * Are softirqs going to be turned on now:
 	 */
@@ -177,6 +187,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 	 */
 	preempt_count_sub(cnt - 1);
 
+
 	if (unlikely(!in_interrupt() && local_softirq_pending())) {
 		/*
 		 * Run softirq if any pending. And do it in its own stack
@@ -246,9 +257,6 @@ static void local_bh_exit(void)
 	__preempt_count_sub(SOFTIRQ_OFFSET);
 }
 
-
-
-
 /*
  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
  * but break the loop if need_resched() is set or after 2 ms.
@@ -395,15 +403,17 @@ asmlinkage __visible void do_softirq(void)
  */
 void irq_enter(void)
 {
+	unsigned int bh;
+
 	rcu_irq_enter();
 	if (is_idle_task(current) && !in_interrupt()) {
 		/*
 		 * Prevent raise_softirq from needlessly waking up ksoftirqd
 		 * here, as softirq will be serviced on return from interrupt.
 		 */
-		local_bh_disable(SOFTIRQ_ALL_MASK);
+		bh = local_bh_disable(SOFTIRQ_ALL_MASK);
 		tick_irq_enter();
-		local_bh_enable_no_softirq();
+		local_bh_enable_no_softirq(bh);
 	}
 
 	__irq_enter();
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ