[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230801132441.559222-3-frederic@kernel.org>
Date: Tue, 1 Aug 2023 15:24:37 +0200
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
"Paul E . McKenney" <paulmck@...nel.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Anna-Maria Behnsen <anna-maria@...utronix.de>,
Eric Dumazet <edumazet@...gle.com>
Subject: [RFC PATCH 2/6] softirq: Make softirq handling entry/exit generally available
In order to prepare for re-enabling softirqs from vector callbacks that
are known safe, make the code incrementing the preempt count while
servicing softirqs more generally available.
No intended behaviour change.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
---
include/linux/bottom_half.h | 7 +++++++
kernel/softirq.c | 22 ++++++++++++++++------
2 files changed, 23 insertions(+), 6 deletions(-)
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index fc53e0ad56d9..2243c7de4917 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -33,6 +33,13 @@ static inline void local_bh_enable(void)
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
+static inline void local_bh_enter(void)
+{
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+}
+
+extern void local_bh_exit(void);
+
#ifdef CONFIG_PREEMPT_RT
extern bool local_bh_blocked(void);
#else
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 1a3c3fe341ea..ba998d572ef4 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -247,21 +247,26 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
}
EXPORT_SYMBOL(__local_bh_enable_ip);
+inline void local_bh_exit(void)
+{
+ __local_bh_enable(SOFTIRQ_OFFSET, true);
+ WARN_ON_ONCE(in_interrupt());
+}
+
/*
* Invoked from ksoftirqd_run() outside of the interrupt disabled section
* to acquire the per CPU local lock for reentrancy protection.
*/
static inline void ksoftirqd_run_begin(void)
{
- __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+ local_bh_enter();
local_irq_disable();
}
/* Counterpart to ksoftirqd_run_begin() */
static inline void ksoftirqd_run_end(void)
{
- __local_bh_enable(SOFTIRQ_OFFSET, true);
- WARN_ON_ONCE(in_interrupt());
+ local_bh_exit();
local_irq_enable();
}
@@ -389,15 +394,20 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
}
EXPORT_SYMBOL(__local_bh_enable_ip);
+inline void local_bh_exit(void)
+{
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ WARN_ON_ONCE(in_interrupt());
+}
+
static inline void softirq_handle_begin(void)
{
- __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+ local_bh_enter();
}
static inline void softirq_handle_end(void)
{
- __local_bh_enable(SOFTIRQ_OFFSET);
- WARN_ON_ONCE(in_interrupt());
+ local_bh_exit();
}
static inline void ksoftirqd_run_begin(void)
--
2.34.1
Powered by blists - more mailing lists